summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--V8Binding/v8/AUTHORS1
-rw-r--r--V8Binding/v8/ChangeLog45
-rw-r--r--V8Binding/v8/SConstruct8
-rw-r--r--V8Binding/v8/benchmarks/revisions.html2
-rw-r--r--V8Binding/v8/benchmarks/run.html37
-rw-r--r--V8Binding/v8/benchmarks/style.css38
-rw-r--r--V8Binding/v8/include/v8.h22
-rwxr-xr-xV8Binding/v8/src/SConscript3
-rw-r--r--V8Binding/v8/src/accessors.cc5
-rw-r--r--V8Binding/v8/src/api.cc25
-rw-r--r--V8Binding/v8/src/arm/assembler-arm-inl.h2
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.cc32
-rw-r--r--V8Binding/v8/src/arm/builtins-arm.cc48
-rw-r--r--V8Binding/v8/src/arm/codegen-arm-inl.h10
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc1563
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h69
-rw-r--r--V8Binding/v8/src/arm/constants-arm.h51
-rw-r--r--V8Binding/v8/src/arm/cpu-arm.cc4
-rw-r--r--V8Binding/v8/src/arm/disasm-arm.cc43
-rw-r--r--V8Binding/v8/src/arm/frames-arm.h242
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc38
-rw-r--r--V8Binding/v8/src/arm/jump-target-arm.cc4
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.cc134
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.h35
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.cc222
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.h10
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc164
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.cc8
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.h4
-rw-r--r--V8Binding/v8/src/array.js63
-rw-r--r--V8Binding/v8/src/assembler.cc43
-rw-r--r--V8Binding/v8/src/assembler.h53
-rw-r--r--V8Binding/v8/src/ast.cc2
-rw-r--r--V8Binding/v8/src/ast.h16
-rw-r--r--V8Binding/v8/src/bootstrapper.cc21
-rw-r--r--V8Binding/v8/src/builtins.cc3
-rw-r--r--V8Binding/v8/src/builtins.h2
-rw-r--r--V8Binding/v8/src/code-stubs.cc6
-rw-r--r--V8Binding/v8/src/code-stubs.h2
-rw-r--r--V8Binding/v8/src/codegen.cc138
-rw-r--r--V8Binding/v8/src/codegen.h56
-rw-r--r--V8Binding/v8/src/compilation-cache.cc360
-rw-r--r--V8Binding/v8/src/compilation-cache.h17
-rw-r--r--V8Binding/v8/src/compiler.cc44
-rw-r--r--V8Binding/v8/src/contexts.cc2
-rw-r--r--V8Binding/v8/src/conversions.cc2
-rw-r--r--V8Binding/v8/src/d8.cc20
-rw-r--r--V8Binding/v8/src/d8.js123
-rw-r--r--V8Binding/v8/src/date-delay.js241
-rw-r--r--V8Binding/v8/src/debug-delay.js83
-rw-r--r--V8Binding/v8/src/debug.cc14
-rw-r--r--V8Binding/v8/src/disassembler.cc7
-rw-r--r--V8Binding/v8/src/dtoa-config.c5
-rw-r--r--V8Binding/v8/src/execution.cc2
-rw-r--r--V8Binding/v8/src/factory.cc10
-rw-r--r--V8Binding/v8/src/factory.h4
-rw-r--r--V8Binding/v8/src/flag-definitions.h3
-rw-r--r--V8Binding/v8/src/frame-element.h49
-rw-r--r--V8Binding/v8/src/frames-inl.h27
-rw-r--r--V8Binding/v8/src/frames.cc14
-rw-r--r--V8Binding/v8/src/frames.h8
-rw-r--r--V8Binding/v8/src/globals.h2
-rw-r--r--V8Binding/v8/src/heap-inl.h22
-rw-r--r--V8Binding/v8/src/heap.cc312
-rw-r--r--V8Binding/v8/src/heap.h139
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32-inl.h2
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.cc42
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.h27
-rw-r--r--V8Binding/v8/src/ia32/builtins-ia32.cc33
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32-inl.h10
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.cc1456
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.h91
-rw-r--r--V8Binding/v8/src/ia32/frames-ia32.h173
-rw-r--r--V8Binding/v8/src/ia32/ic-ia32.cc36
-rw-r--r--V8Binding/v8/src/ia32/jump-target-ia32.cc6
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc24
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.h7
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc150
-rw-r--r--V8Binding/v8/src/ia32/virtual-frame-ia32.cc40
-rw-r--r--V8Binding/v8/src/ia32/virtual-frame-ia32.h77
-rw-r--r--V8Binding/v8/src/ic.cc132
-rw-r--r--V8Binding/v8/src/ic.h13
-rw-r--r--V8Binding/v8/src/jsregexp.cc67
-rw-r--r--V8Binding/v8/src/jump-target.cc197
-rw-r--r--V8Binding/v8/src/jump-target.h51
-rw-r--r--V8Binding/v8/src/log-inl.h126
-rw-r--r--V8Binding/v8/src/log-utils.cc193
-rw-r--r--V8Binding/v8/src/log-utils.h67
-rw-r--r--V8Binding/v8/src/log.cc326
-rw-r--r--V8Binding/v8/src/log.h83
-rw-r--r--V8Binding/v8/src/macro-assembler.h2
-rw-r--r--V8Binding/v8/src/macros.py22
-rw-r--r--V8Binding/v8/src/mark-compact.cc19
-rw-r--r--V8Binding/v8/src/math.js92
-rw-r--r--V8Binding/v8/src/messages.js338
-rw-r--r--V8Binding/v8/src/mirror-delay.js200
-rw-r--r--V8Binding/v8/src/objects-debug.cc22
-rw-r--r--V8Binding/v8/src/objects-inl.h52
-rw-r--r--V8Binding/v8/src/objects.cc465
-rw-r--r--V8Binding/v8/src/objects.h161
-rw-r--r--V8Binding/v8/src/oprofile-agent.cc4
-rw-r--r--V8Binding/v8/src/parser.cc23
-rw-r--r--V8Binding/v8/src/platform-linux.cc27
-rw-r--r--V8Binding/v8/src/platform-macos.cc25
-rw-r--r--V8Binding/v8/src/platform.h23
-rw-r--r--V8Binding/v8/src/property.h13
-rw-r--r--V8Binding/v8/src/regexp-delay.js2
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler-irregexp.cc1
-rw-r--r--V8Binding/v8/src/register-allocator.cc13
-rw-r--r--V8Binding/v8/src/register-allocator.h94
-rw-r--r--V8Binding/v8/src/rewriter.cc5
-rw-r--r--V8Binding/v8/src/runtime.cc846
-rw-r--r--V8Binding/v8/src/runtime.h14
-rw-r--r--V8Binding/v8/src/runtime.js125
-rw-r--r--V8Binding/v8/src/scopeinfo.cc86
-rw-r--r--V8Binding/v8/src/scopeinfo.h68
-rw-r--r--V8Binding/v8/src/scopes.cc23
-rw-r--r--V8Binding/v8/src/scopes.h2
-rw-r--r--V8Binding/v8/src/serialize.cc54
-rw-r--r--V8Binding/v8/src/spaces.cc2
-rw-r--r--V8Binding/v8/src/spaces.h6
-rw-r--r--V8Binding/v8/src/string.js54
-rw-r--r--V8Binding/v8/src/stub-cache.cc137
-rw-r--r--V8Binding/v8/src/stub-cache.h40
-rw-r--r--V8Binding/v8/src/top.cc5
-rw-r--r--V8Binding/v8/src/top.h1
-rw-r--r--V8Binding/v8/src/utils.h5
-rw-r--r--V8Binding/v8/src/v8-counters.h8
-rw-r--r--V8Binding/v8/src/v8.cc37
-rw-r--r--V8Binding/v8/src/v8.h14
-rw-r--r--V8Binding/v8/src/v8natives.js24
-rw-r--r--V8Binding/v8/src/variables.cc4
-rw-r--r--V8Binding/v8/src/variables.h13
-rw-r--r--V8Binding/v8/src/version.cc4
-rw-r--r--V8Binding/v8/src/virtual-frame.cc18
-rw-r--r--V8Binding/v8/src/virtual-frame.h2
-rw-r--r--V8Binding/v8/src/x64/assembler-x64-inl.h86
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc1101
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h310
-rw-r--r--V8Binding/v8/src/x64/builtins-x64.cc668
-rw-r--r--V8Binding/v8/src/x64/codegen-x64-inl.h18
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc6678
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h29
-rw-r--r--V8Binding/v8/src/x64/frames-x64.cc86
-rw-r--r--V8Binding/v8/src/x64/frames-x64.h55
-rw-r--r--V8Binding/v8/src/x64/ic-x64.cc169
-rw-r--r--V8Binding/v8/src/x64/jump-target-x64.cc341
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc728
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h40
-rw-r--r--V8Binding/v8/src/x64/register-allocator-x64-inl.h37
-rw-r--r--V8Binding/v8/src/x64/register-allocator-x64.cc57
-rw-r--r--V8Binding/v8/src/x64/register-allocator-x64.h2
-rw-r--r--V8Binding/v8/src/x64/simulator-x64.h1
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc151
-rw-r--r--V8Binding/v8/src/x64/virtual-frame-x64.cc1025
-rw-r--r--V8Binding/v8/src/x64/virtual-frame-x64.h21
-rw-r--r--V8Binding/v8/test/cctest/cctest.status4
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc139
-rw-r--r--V8Binding/v8/test/cctest/test-assembler-x64.cc49
-rw-r--r--V8Binding/v8/test/cctest/test-debug.cc177
-rw-r--r--V8Binding/v8/test/cctest/test-func-name-inference.cc14
-rw-r--r--V8Binding/v8/test/cctest/test-heap.cc4
-rw-r--r--V8Binding/v8/test/cctest/test-log-utils.cc183
-rw-r--r--V8Binding/v8/test/cctest/test-mark-compact.cc7
-rw-r--r--V8Binding/v8/test/message/overwritten-builtins.js31
-rw-r--r--V8Binding/v8/test/message/overwritten-builtins.out30
-rw-r--r--V8Binding/v8/test/mjsunit/arguments-apply.js134
-rw-r--r--V8Binding/v8/test/mjsunit/arguments-lazy.js47
-rw-r--r--V8Binding/v8/test/mjsunit/array-sort.js38
-rw-r--r--V8Binding/v8/test/mjsunit/big-object-literal.js2
-rw-r--r--V8Binding/v8/test/mjsunit/compare-nan.js24
-rw-r--r--V8Binding/v8/test/mjsunit/date-parse.js2
-rw-r--r--V8Binding/v8/test/mjsunit/debug-scopes.js660
-rw-r--r--V8Binding/v8/test/mjsunit/debug-sourceinfo.js628
-rw-r--r--V8Binding/v8/test/mjsunit/html-comments.js16
-rw-r--r--V8Binding/v8/test/mjsunit/regexp-captures.js31
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-1919169.js40
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-386.js47
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-392.js34
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-6-9-regexp.js30
-rw-r--r--V8Binding/v8/test/mjsunit/sin-cos.js45
-rw-r--r--V8Binding/v8/test/mjsunit/smi-ops.js55
-rw-r--r--V8Binding/v8/test/mjsunit/stack-traces.js160
-rw-r--r--V8Binding/v8/test/mjsunit/toint32.js16
-rw-r--r--V8Binding/v8/test/mjsunit/tools/logreader.js82
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status34
-rw-r--r--V8Binding/v8/tools/codemap.js4
-rw-r--r--V8Binding/v8/tools/gyp/v8.gyp1
-rw-r--r--V8Binding/v8/tools/linux-tick-processor12
-rw-r--r--V8Binding/v8/tools/logreader.js317
-rw-r--r--V8Binding/v8/tools/oprofile/annotate7
-rw-r--r--V8Binding/v8/tools/oprofile/common19
-rw-r--r--V8Binding/v8/tools/oprofile/dump7
-rw-r--r--V8Binding/v8/tools/oprofile/report7
-rw-r--r--V8Binding/v8/tools/oprofile/reset7
-rw-r--r--V8Binding/v8/tools/oprofile/run14
-rw-r--r--V8Binding/v8/tools/oprofile/shutdown7
-rw-r--r--V8Binding/v8/tools/oprofile/start7
-rwxr-xr-xV8Binding/v8/tools/test.py4
-rw-r--r--V8Binding/v8/tools/tickprocessor.js163
-rwxr-xr-xV8Binding/v8/tools/v8.xcodeproj/project.pbxproj2
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base.vcproj4
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj4
-rw-r--r--V8Binding/v8/tools/windows-tick-processor.bat2
-rw-r--r--WEBKIT_MERGE_REVISION2
205 files changed, 21621 insertions, 4743 deletions
diff --git a/V8Binding/v8/AUTHORS b/V8Binding/v8/AUTHORS
index 9b198d0..bfe58a2 100644
--- a/V8Binding/v8/AUTHORS
+++ b/V8Binding/v8/AUTHORS
@@ -6,6 +6,7 @@
Google Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org>
+Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index 3df6885..1306112 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,48 @@
+2009-06-29: Version 1.2.10
+
+ Improved debugger support.
+
+ Fixed bug in exception message reporting (issue 390).
+
+ Improved overall performance.
+
+
+2009-06-23: Version 1.2.9
+
+ Improved math performance on ARM.
+
+ Fixed profiler name-inference bug.
+
+ Fixed handling of shared libraries in the profiler tick processor
+ scripts.
+
+ Fixed handling of tests that time out in the test scripts.
+
+ Fixed compilation on MacOS X version 10.4.
+
+ Fixed two bugs in the regular expression engine.
+
+ Fixed a bug in the string type inference.
+
+ Fixed a bug in the handling of 'constant function' properties.
+
+ Improved overall performance.
+
+
+2009-06-16: Version 1.2.8
+
+ Optimized math on ARM platforms.
+
+ Fixed two crash bugs in the handling of getters and setters.
+
+ Improved the debugger support by adding scope chain information.
+
+ Improved the profiler support by compressing log data transmitted
+ to clients.
+
+ Improved overall performance.
+
+
2009-06-08: Version 1.2.7
Improved debugger and profiler support.
diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct
index 3b14eea..0baf71b 100644
--- a/V8Binding/v8/SConstruct
+++ b/V8Binding/v8/SConstruct
@@ -125,7 +125,7 @@ LIBRARY_FLAGS = {
}
},
'os:macos': {
- 'CCFLAGS': ['-ansi'],
+ 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
@@ -641,7 +641,7 @@ def GetVersionComponents():
def GetVersion():
version_components = GetVersionComponents()
-
+
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
@@ -649,10 +649,10 @@ def GetVersion():
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
-
+
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
-
+
if match:
return match.group(1).strip()
else:
diff --git a/V8Binding/v8/benchmarks/revisions.html b/V8Binding/v8/benchmarks/revisions.html
index 458f8db..b86c876 100644
--- a/V8Binding/v8/benchmarks/revisions.html
+++ b/V8Binding/v8/benchmarks/revisions.html
@@ -1,7 +1,7 @@
<html>
<head>
<title>V8 Benchmark Suite Revisions</title>
-<link type="text/css" rel="stylesheet" href="style.css"></link>
+<link type="text/css" rel="stylesheet" href="style.css" />
</head>
<body>
<div>
diff --git a/V8Binding/v8/benchmarks/run.html b/V8Binding/v8/benchmarks/run.html
index 6adb6d2..050764e 100644
--- a/V8Binding/v8/benchmarks/run.html
+++ b/V8Binding/v8/benchmarks/run.html
@@ -1,5 +1,10 @@
-<html>
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<html lang="en">
<head>
+<meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<meta http-equiv="Content-Script-Type" content="text/javascript">
+<meta http-equiv="Content-Style-Type" content="text/css">
<title>V8 Benchmark Suite</title>
<script type="text/javascript" src="base.js"></script>
<script type="text/javascript" src="richards.js"></script>
@@ -9,7 +14,7 @@
<script type="text/javascript" src="earley-boyer.js"></script>
<script type="text/javascript" src="regexp.js"></script>
<script type="text/javascript" src="splay.js"></script>
-<link type="text/css" rel="stylesheet" href="style.css"></link>
+<link type="text/css" rel="stylesheet" href="style.css" />
<script type="text/javascript">
var completed = 0;
var benchmarks = BenchmarkSuite.CountBenchmarks();
@@ -25,12 +30,12 @@ function ShowProgress(name) {
function AddResult(name, result) {
var text = name + ': ' + result;
var results = document.getElementById("results");
- results.innerHTML += (text + "<br/>");
+ results.innerHTML += (text + "<br>");
}
function AddError(name, error) {
- AddResult(name, '<b>error</b>');
+ AddResult(name, '<b>error<\/b>');
success = false;
}
@@ -53,11 +58,11 @@ function Run() {
function Load() {
var version = BenchmarkSuite.version;
document.getElementById("version").innerHTML = version;
- window.setTimeout(Run, 200);
+ setTimeout(Run, 200);
}
</script>
</head>
-<body onLoad="Load()">
+<body onload="Load()">
<div>
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
<table>
@@ -71,15 +76,15 @@ the individual benchmarks and of a reference system (score
higher scores means better performance: <em>Bigger is better!</em>
<ul>
-<li><b>Richards</b><br/>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
-<li><b>DeltaBlue</b><br/>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
-<li><b>Crypto</b><br/>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
-<li><b>RayTrace</b><br/>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
-<li><b>EarleyBoyer</b><br/>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
-<li><b>RegExp</b><br/>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
+<li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
+<li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
+<li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
+<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
+<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
+<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>).
</li>
-<li><b>Splay</b><br/>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
+<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
</ul>
<p>
@@ -92,9 +97,9 @@ the <a href="http://v8.googlecode.com/svn/data/benchmarks/current/revisions.html
</td><td style="text-align: center">
<div class="run">
- <div id="status" style="text-align: center; margin-top: 50px; font-size: 120%; font-weight: bold;">Starting...</div>
- <div style="text-align: left; margin: 30px 0 0 90px;" id="results">
- <div>
+ <div id="status">Starting...</div>
+ <div id="results">
+ </div>
</div>
</td></tr></table>
diff --git a/V8Binding/v8/benchmarks/style.css b/V8Binding/v8/benchmarks/style.css
index d976cdd..46320c1 100644
--- a/V8Binding/v8/benchmarks/style.css
+++ b/V8Binding/v8/benchmarks/style.css
@@ -1,11 +1,7 @@
-body {
- font-family: sans-serif;
-}
-
-hr{
+hr {
border: 1px solid;
border-color: #36C;
- margin: 1em 0
+ margin: 1em 0;
}
h1, h2, h3, h4 {
@@ -14,27 +10,17 @@ h1, h2, h3, h4 {
}
h1 {
- font-size: 190%;
- height: 1.2em;
-}
-
-
-h2{
- font-size: 140%;
+ font-size: 154%;
height: 1.2em;
}
-h3{
- font-size: 100%;
-}
-li{
+li {
margin: .3em 0 1em 0;
}
-body{
+body {
font-family: Helvetica,Arial,sans-serif;
- font-size: small;
color: #000;
background-color: #fff;
}
@@ -54,7 +40,7 @@ div.subtitle {
}
td.contents {
- text-align: start;
+ text-align: left;
}
div.run {
@@ -68,3 +54,15 @@ div.run {
background-repeat: no-repeat;
border: 1px solid rgb(51, 102, 204);
}
+
+#status {
+ text-align: center;
+ margin-top: 50px;
+ font-size: 120%;
+ font-weight: bold;
+}
+
+#results {
+ text-align: left;
+ margin: 30px 0 0 90px;
+}
diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h
index 87ce2a2..8f22c81 100644
--- a/V8Binding/v8/include/v8.h
+++ b/V8Binding/v8/include/v8.h
@@ -212,9 +212,9 @@ template <class T> class V8EXPORT_INLINE Handle {
*/
bool IsEmpty() const { return val_ == 0; }
- T* operator->() const;
+ T* operator->() const { return val_; }
- T* operator*() const;
+ T* operator*() const { return val_; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
@@ -1176,6 +1176,12 @@ class V8EXPORT Array : public Object {
public:
uint32_t Length() const;
+ /**
+ * Clones an element at index |index|. Returns an empty
+ * handle if cloning fails (for any reason).
+ */
+ Local<Object> CloneElementAt(uint32_t index);
+
static Local<Array> New(int length = 0);
static Array* Cast(Value* obj);
private:
@@ -2509,18 +2515,6 @@ void Persistent<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<void**>(**this));
}
-template <class T>
-T* Handle<T>::operator->() const {
- return val_;
-}
-
-
-template <class T>
-T* Handle<T>::operator*() const {
- return val_;
-}
-
-
Local<Value> Arguments::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
diff --git a/V8Binding/v8/src/SConscript b/V8Binding/v8/src/SConscript
index 64d2063..f1ca875 100755
--- a/V8Binding/v8/src/SConscript
+++ b/V8Binding/v8/src/SConscript
@@ -77,7 +77,8 @@ SOURCES = {
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
# 'x64/regexp-macro-assembler-x64.cc',
- 'x64/stub-cache-x64.cc'
+ 'x64/register-allocator-x64.cc',
+ 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
diff --git a/V8Binding/v8/src/accessors.cc b/V8Binding/v8/src/accessors.cc
index ac6cdf9..82ae702 100644
--- a/V8Binding/v8/src/accessors.cc
+++ b/V8Binding/v8/src/accessors.cc
@@ -511,7 +511,10 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that.
int index = ScopeInfo<>::StackSlotIndex(frame->code(),
Heap::arguments_symbol());
- if (index >= 0) return frame->GetExpression(index);
+ if (index >= 0) {
+ Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
+ if (!arguments->IsTheHole()) return *arguments;
+ }
// If there isn't an arguments variable in the stack, we need to
// find the frame that holds the actual arguments passed to the
diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc
index 7b7f290..b9e0cec 100644
--- a/V8Binding/v8/src/api.cc
+++ b/V8Binding/v8/src/api.cc
@@ -2124,7 +2124,9 @@ int v8::Object::GetIdentityHash() {
} else {
int attempts = 0;
do {
- hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi.
+ // Generate a random 32-bit hash value but limit range to fit
+ // within a smi.
+ hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@@ -3010,6 +3012,26 @@ uint32_t v8::Array::Length() const {
}
+Local<Object> Array::CloneElementAt(uint32_t index) {
+ ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (!self->HasFastElements()) {
+ return Local<Object>();
+ }
+ i::FixedArray* elms = self->elements();
+ i::Object* paragon = elms->get(index);
+ if (!paragon->IsJSObject()) {
+ return Local<Object>();
+ }
+ i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
+ EXCEPTION_PREAMBLE();
+ i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(Local<Object>());
+ return Utils::ToLocal(result);
+}
+
+
Local<String> v8::String::NewSymbol(const char* data, int length) {
EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)");
@@ -3382,6 +3404,7 @@ void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
+ HandleScope scope;
i::Debugger::SetMessageHandler(handler);
}
diff --git a/V8Binding/v8/src/arm/assembler-arm-inl.h b/V8Binding/v8/src/arm/assembler-arm-inl.h
index 824a5fd..4dda7ec 100644
--- a/V8Binding/v8/src/arm/assembler-arm-inl.h
+++ b/V8Binding/v8/src/arm/assembler-arm-inl.h
@@ -50,7 +50,7 @@ Condition NegateCondition(Condition cc) {
}
-void RelocInfo::apply(int delta) {
+void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
diff --git a/V8Binding/v8/src/arm/assembler-arm.cc b/V8Binding/v8/src/arm/assembler-arm.cc
index 6ec8f46..d168577 100644
--- a/V8Binding/v8/src/arm/assembler-arm.cc
+++ b/V8Binding/v8/src/arm/assembler-arm.cc
@@ -491,6 +491,20 @@ static bool fits_shifter(uint32_t imm32,
}
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ return Serializer::enabled();
+ } else if (rmode == RelocInfo::NONE) {
+ return false;
+ }
+ return true;
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -501,8 +515,7 @@ void Assembler::addrmod1(Instr instr,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
- if ((x.rmode_ != RelocInfo::NONE &&
- x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) ||
+ if (MustUseIp(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -816,7 +829,6 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- ASSERT(!dst.is(src1));
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -825,7 +837,6 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dst.is(src1));
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -837,7 +848,7 @@ void Assembler::smlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -850,7 +861,7 @@ void Assembler::smull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -863,7 +874,7 @@ void Assembler::umlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -876,8 +887,8 @@ void Assembler::umull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -906,8 +917,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
- if ((src.rmode_ != RelocInfo::NONE &&
- src.rmode_ != RelocInfo::EXTERNAL_REFERENCE)||
+ if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// immediate operand cannot be encoded, load it first to register ip
RecordRelocInfo(src.rmode_, src.imm32_);
diff --git a/V8Binding/v8/src/arm/builtins-arm.cc b/V8Binding/v8/src/arm/builtins-arm.cc
index 588798b..b5332ec 100644
--- a/V8Binding/v8/src/arm/builtins-arm.cc
+++ b/V8Binding/v8/src/arm/builtins-arm.cc
@@ -64,11 +64,27 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call);
// Check that the function is a JSFunction.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
+ // Jump to the function-specific construct stub.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // r0: number of arguments
+ // r1: called object
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing r0).
+ __ mov(r2, Operand(0));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -159,9 +175,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -181,16 +195,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
-
- // r0: number of arguments
- // r1: called object
- __ bind(&non_function_call);
-
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
}
@@ -290,9 +294,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(eq, &function);
// Non-function called: Clear the function to force exception.
@@ -328,9 +330,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &call_to_object);
__ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &done);
@@ -501,9 +501,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Check if the receiver is already a JavaScript object.
// r0: receiver
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(lt, &call_to_object);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &push_receiver);
diff --git a/V8Binding/v8/src/arm/codegen-arm-inl.h b/V8Binding/v8/src/arm/codegen-arm-inl.h
index 544331a..5a29a45 100644
--- a/V8Binding/v8/src/arm/codegen-arm-inl.h
+++ b/V8Binding/v8/src/arm/codegen-arm-inl.h
@@ -39,6 +39,16 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(COS, args);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 7428d3b..989a09c 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -41,6 +41,18 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+
+
+
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
@@ -289,7 +301,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// r0: result
// sp: stack pointer
// fp: frame pointer
- // pp: parameter pointer
// cp: callee's context
__ mov(r0, Operand(Factory::undefined_value()));
@@ -703,6 +714,7 @@ class GenericBinaryOpStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm);
const char* GetName() {
switch (op_) {
@@ -1002,7 +1014,13 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
-void CodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc,
+ Expression* left,
+ Expression* right,
+ bool strict) {
+ if (left != NULL) LoadAndSpill(left);
+ if (right != NULL) LoadAndSpill(right);
+
VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
@@ -1026,43 +1044,19 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
__ tst(r2, Operand(kSmiTagMask));
smi.Branch(eq);
- // Perform non-smi comparison by runtime call.
- frame_->EmitPush(r1);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- int arg_count = 1;
- if (cc == eq) {
- native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- frame_->EmitPush(r0);
- arg_count++;
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- }
+ // Perform non-smi comparison by stub.
+ // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
+ // We call with 0 args because there are 0 on the stack.
+ CompareStub stub(cc, strict);
+ frame_->CallStub(&stub, 0);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- frame_->EmitPush(r0);
- Result arg_count_register = allocator_->Allocate(r0);
- ASSERT(arg_count_register.is_valid());
- __ mov(arg_count_register.reg(), Operand(arg_count));
- Result result = frame_->InvokeBuiltin(native,
- CALL_JS,
- &arg_count_register,
- arg_count + 1);
+ Result result = allocator_->Allocate(r0);
+ ASSERT(result.is_valid());
__ cmp(result.reg(), Operand(0));
result.Unuse();
exit.Jump();
- // test smi equality by pointer comparison.
+ // Do smi comparisons by pointer comparison.
smi.Bind();
__ cmp(r1, Operand(r0));
@@ -1471,87 +1465,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
}
-int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastSwitchMaxOverheadFactor;
-}
-
-int CodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastSwitchMinCaseCount;
-}
-
-
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels) {
- VirtualFrame::SpilledScope spilled_scope;
- JumpTarget setup_default;
- JumpTarget is_smi;
-
- // A non-null default label pointer indicates a default case among
- // the case labels. Otherwise we use the break target as a
- // "default" for failure to hit the jump table.
- JumpTarget* default_target =
- (default_label == NULL) ? node->break_target() : &setup_default;
-
- ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
- frame_->EmitPop(r0);
-
- // Test for a Smi value in a HeapNumber.
- __ tst(r0, Operand(kSmiTagMask));
- is_smi.Branch(eq);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
- default_target->Branch(ne);
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNumberToSmi, 1);
- is_smi.Bind();
-
- if (min_index != 0) {
- // Small positive numbers can be immediate operands.
- if (min_index < 0) {
- // If min_index is Smi::kMinValue, -min_index is not a Smi.
- if (Smi::IsValid(-min_index)) {
- __ add(r0, r0, Operand(Smi::FromInt(-min_index)));
- } else {
- __ add(r0, r0, Operand(Smi::FromInt(-min_index - 1)));
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- }
- } else {
- __ sub(r0, r0, Operand(Smi::FromInt(min_index)));
- }
- }
- __ tst(r0, Operand(0x80000000 | kSmiTagMask));
- default_target->Branch(ne);
- __ cmp(r0, Operand(Smi::FromInt(range)));
- default_target->Branch(ge);
- VirtualFrame* start_frame = new VirtualFrame(frame_);
- __ SmiJumpTable(r0, case_targets);
-
- GenerateFastCaseSwitchCases(node, case_labels, start_frame);
-
- // If there was a default case among the case labels, we need to
- // emit code to jump to it from the default target used for failure
- // to hit the jump table.
- if (default_label != NULL) {
- if (has_valid_frame()) {
- node->break_target()->Jump();
- }
- setup_default.Bind();
- frame_->MergeTo(start_frame);
- __ b(default_label);
- DeleteFrame();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-}
-
-
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -1562,10 +1475,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
LoadAndSpill(node->tag());
- if (TryGenerateFastCaseSwitchStatement(node)) {
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
- return;
- }
JumpTarget next_test;
JumpTarget fall_through;
@@ -1590,8 +1499,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Duplicate TOS.
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(clause->label());
- Comparison(eq, true);
+ Comparison(eq, NULL, clause->label(), true);
Branch(false, &next_test);
// Before entering the body from the test, remove the switch value from
@@ -1872,9 +1780,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Check if enumerable is already a JSObject
__ tst(r0, Operand(kSmiTagMask));
primitive.Branch(eq);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
jsobject.Branch(hs);
primitive.Bind();
@@ -2107,14 +2013,16 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// Get an external reference to the handler address.
ExternalReference handler_address(Top::k_handler_address);
- // The next handler address is at kNextIndex in the stack.
- const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
// If we can fall off the end of the try block, unlink from try chain.
if (has_valid_frame()) {
- __ ldr(r1, frame_->ElementAt(kNextIndex));
+ // The next handler address is on top of the frame. Unlink from
+ // the handler list and drop the rest of this handler from the
+ // frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (has_unlinks) {
exit.Jump();
}
@@ -2134,15 +2042,11 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// break from (eg, for...in) may have left stuff on the stack.
__ mov(r3, Operand(handler_address));
__ ldr(sp, MemOperand(r3));
- // The stack pointer was restored to just below the code slot
- // (the topmost slot) in the handler.
- frame_->Forget(frame_->height() - handler_height + 1);
+ frame_->Forget(frame_->height() - handler_height);
- // kNextIndex is off by one because the code slot has already
- // been dropped.
- __ ldr(r1, frame_->ElementAt(kNextIndex - 1));
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ frame_->EmitPop(r1);
__ str(r1, MemOperand(r3));
- // The code slot has already been dropped from the handler.
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
@@ -2223,15 +2127,15 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// Get an external reference to the handler address.
ExternalReference handler_address(Top::k_handler_address);
- // The next handler address is at kNextIndex in the stack.
- const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
// If we can fall off the end of the try block, unlink from the try
// chain and set the state on the frame to FALLING.
if (has_valid_frame()) {
- __ ldr(r1, frame_->ElementAt(kNextIndex));
+ // The next handler address is on top of the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// Fake a top of stack value (unneeded when FALLING) and set the
// state in r2, then jump around the unlink blocks if any.
@@ -2262,17 +2166,14 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// stack.
__ mov(r3, Operand(handler_address));
__ ldr(sp, MemOperand(r3));
- // The stack pointer was restored to the address slot in the handler.
- ASSERT(StackHandlerConstants::kNextOffset == 1 * kPointerSize);
- frame_->Forget(frame_->height() - handler_height + 1);
+ frame_->Forget(frame_->height() - handler_height);
// Unlink this handler and drop it from the frame. The next
- // handler address is now on top of the frame.
+ // handler address is currently on top of the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1);
__ str(r1, MemOperand(r3));
- // The top (code) and the second (handler) slot have both been
- // dropped already.
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 2);
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (i == kReturnShadowIndex) {
// If this label shadowed the function return, materialize the
@@ -3272,6 +3173,15 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0)); // Load the object.
+ frame_->CallRuntime(Runtime::kClassOf, 1);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
@@ -3281,11 +3191,8 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
// if (object->IsSmi()) return the object.
__ tst(r0, Operand(kSmiTagMask));
leave.Branch(eq);
- // It is a heap object - get map.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- // if (!object->IsJSValue()) return the object.
- __ cmp(r1, Operand(JS_VALUE_TYPE));
+ // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+ __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
leave.Branch(ne);
// Load the value.
__ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -3305,11 +3212,8 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
// if (object->IsSmi()) return object.
__ tst(r1, Operand(kSmiTagMask));
leave.Branch(eq);
- // It is a heap object - get map.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- // if (!object->IsJSValue()) return object.
- __ cmp(r2, Operand(JS_VALUE_TYPE));
+ // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+ __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
leave.Branch(ne);
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
@@ -3381,16 +3285,21 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
__ and_(r1, r0, Operand(kSmiTagMask));
__ eor(r1, r1, Operand(kSmiTagMask), SetCC);
answer.Branch(ne);
- // It is a heap object - get the map.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- // Check if the object is a JS array or not.
- __ cmp(r1, Operand(JS_ARRAY_TYPE));
+ // It is a heap object - get the map. Check if the object is a JS array.
+ __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
answer.Bind();
cc_reg_ = eq;
}
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 0);
+ frame_->CallRuntime(Runtime::kIsConstructCall, 0);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3423,6 +3332,30 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 0);
+ __ Call(ExternalReference::random_positive_smi_function().address(),
+ RelocInfo::RUNTIME_ENTRY);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ LoadAndSpill(args->at(0));
+ switch (op) {
+ case SIN:
+ frame_->CallRuntime(Runtime::kMath_sin, 1);
+ break;
+ case COS:
+ frame_->CallRuntime(Runtime::kMath_cos, 1);
+ break;
+ }
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3571,7 +3504,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- UnarySubStub stub;
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnarySubStub stub(overwrite);
frame_->CallStub(&stub, 0);
break;
}
@@ -4001,9 +3937,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
@@ -4036,34 +3970,34 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
return;
}
- LoadAndSpill(left);
- LoadAndSpill(right);
switch (op) {
case Token::EQ:
- Comparison(eq, false);
+ Comparison(eq, left, right, false);
break;
case Token::LT:
- Comparison(lt);
+ Comparison(lt, left, right);
break;
case Token::GT:
- Comparison(gt);
+ Comparison(gt, left, right);
break;
case Token::LTE:
- Comparison(le);
+ Comparison(le, left, right);
break;
case Token::GTE:
- Comparison(ge);
+ Comparison(ge, left, right);
break;
case Token::EQ_STRICT:
- Comparison(eq, true);
+ Comparison(eq, left, right, true);
break;
case Token::IN: {
+ LoadAndSpill(left);
+ LoadAndSpill(right);
Result arg_count = allocator_->Allocate(r0);
ASSERT(arg_count.is_valid());
__ mov(arg_count.reg(), Operand(1)); // not counting receiver
@@ -4076,13 +4010,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
case Token::INSTANCEOF: {
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
- Result result = frame_->InvokeBuiltin(Builtins::INSTANCE_OF,
- CALL_JS,
- &arg_count,
- 2);
+ LoadAndSpill(left);
+ LoadAndSpill(right);
+ InstanceofStub stub;
+ Result result = frame_->CallStub(&stub, 2);
+ // At this point if instanceof succeeded then r0 == 0.
__ tst(result.reg(), Operand(result.reg()));
cc_reg_ = eq;
break;
@@ -4341,6 +4273,631 @@ void Reference::SetValue(InitState init_state) {
}
+// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
+// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
+// (31 instead of 32).
+static void CountLeadingZeros(
+ MacroAssembler* masm,
+ Register source,
+ Register scratch,
+ Register zeros) {
+#ifdef __ARM_ARCH_5__
+ __ clz(zeros, source); // This instruction is only supported after ARM5.
+#else
+ __ mov(zeros, Operand(0));
+ __ mov(scratch, source);
+ // Top 16.
+ __ tst(scratch, Operand(0xffff0000));
+ __ add(zeros, zeros, Operand(16), LeaveCC, eq);
+ __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
+ // Top 8.
+ __ tst(scratch, Operand(0xff000000));
+ __ add(zeros, zeros, Operand(8), LeaveCC, eq);
+ __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
+ // Top 4.
+ __ tst(scratch, Operand(0xf0000000));
+ __ add(zeros, zeros, Operand(4), LeaveCC, eq);
+ __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
+ // Top 2.
+ __ tst(scratch, Operand(0xc0000000));
+ __ add(zeros, zeros, Operand(2), LeaveCC, eq);
+ __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
+ // Top bit.
+ __ tst(scratch, Operand(0x80000000));
+ __ add(zeros, zeros, Operand(1), LeaveCC, eq);
+#endif
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
+ // Subtract from 0 if source was negative.
+ __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+ __ cmp(source_, Operand(1));
+ __ b(gt, &not_special);
+
+ // We have -1, 0 or 1, which we treat specially.
+ __ cmp(source_, Operand(0));
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, Operand(0));
+ __ Ret();
+
+ __ bind(&not_special);
+ // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ CountLeadingZeros(masm, source_, mantissa, zeros_);
+ // Compute exponent and or it into the exponent register.
+ // We use result2 as a scratch register here.
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+ __ orr(exponent,
+ exponent,
+ Operand(mantissa, LSL, HeapNumber::kExponentShift));
+ // Shift up the source chopping the top bit off.
+ __ add(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ mov(source_, Operand(source_, LSL, zeros_));
+ // Compute lower part of fraction (last 12 bits).
+ __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
+ // And the top (top 20 bits).
+ __ orr(exponent,
+ exponent,
+ Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
+ __ Ret();
+}
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return the_int_.code() +
+ (the_heap_number_.code() << 4) +
+ (scratch_.code() << 8);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+// See comment for class.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent. This test
+ // has the neat side effect of setting the flags according to the sign.
+ ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ cmp(the_int_, Operand(0x80000000));
+ __ b(eq, &max_negative_int);
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ mov(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+ // Subtract from 0 if the value was negative.
+ __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(ip, Operand(0));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc) {
+ Label not_identical;
+ __ cmp(r0, Operand(r1));
+ __ b(ne, &not_identical);
+
+ Register exp_mask_reg = r5;
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ Label heap_number, return_equal;
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ }
+ }
+ __ bind(&return_equal);
+ if (cc == lt) {
+ __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == gt) {
+ __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ and_(r3, r2, Operand(exp_mask_reg));
+ __ cmp(r3, Operand(exp_mask_reg));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
+ // (it's a NaN). For <= and >= we need to load r0 with the failing value
+ // if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
+ // No fall through here.
+
+ __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict) {
+ Label lhs_is_smi;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &lhs_is_smi);
+
+ // Rhs is a Smi. Check whether the non-smi is a heap number.
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (r0 is already not zero)
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Rhs is a smi, lhs is a number.
+ __ push(lr);
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // r3 and r2 are rhs as double.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a Smi.
+ __ pop(lr);
+ __ jmp(rhs_not_nan);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // r0 is Smi and r1 is heap number.
+ __ push(lr);
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register lhs_exponent = exp_first ? r0 : r1;
+ Register rhs_exponent = exp_first ? r2 : r3;
+ Register lhs_mantissa = exp_first ? r1 : r0;
+ Register rhs_mantissa = exp_first ? r3 : r2;
+ Label one_is_nan, neither_is_nan;
+
+ Register exp_mask_reg = r5;
+
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+ __ cmp(r4, Operand(exp_mask_reg));
+ __ b(ne, rhs_not_nan);
+ __ mov(r4,
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(rhs_mantissa, Operand(0));
+ __ b(ne, &one_is_nan);
+
+ __ bind(rhs_not_nan);
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
+ __ cmp(r4, Operand(exp_mask_reg));
+ __ b(ne, &neither_is_nan);
+ __ mov(r4,
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(lhs_mantissa, Operand(0));
+ __ b(eq, &neither_is_nan);
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in r0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&neither_is_nan);
+}
+
+
+// See comment at call site.
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register lhs_exponent = exp_first ? r0 : r1;
+ Register rhs_exponent = exp_first ? r2 : r3;
+ Register lhs_mantissa = exp_first ? r1 : r0;
+ Register rhs_mantissa = exp_first ? r3 : r2;
+
+ // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ __ cmp(lhs_mantissa, Operand(rhs_mantissa));
+ __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+ // Return non-zero if the numbers are unequal.
+ __ mov(pc, Operand(lr), LeaveCC, ne);
+
+ __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+ // If exponents are equal then return 0.
+ __ mov(pc, Operand(lr), LeaveCC, eq);
+
+ // Exponents are unequal. The only way we can return that the numbers
+ // are equal is if one is -0 and the other is 0. We already dealt
+ // with the case where both are -0 or both are 0.
+ // We start by seeing if the mantissas (that are equal) or the bottom
+ // 31 bits of the rhs exponent are non-zero. If so we return not
+ // equal.
+ __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ mov(r0, Operand(r4), LeaveCC, ne);
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
+ // Now they are equal if and only if the lhs exponent is zero in its
+ // low 31 bits.
+ __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+ __ mov(pc, Operand(lr));
+ } else {
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ __ mov(r5, Operand(ExternalReference::compare_doubles()));
+ __ Jump(r5); // Tail call.
+ }
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
+ // If either operand is a JSObject or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r2 and compare it with
+ // FIRST_JS_OBJECT_TYPE.
+ __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &first_non_object);
+
+ // Return non-zero (r0 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r2, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r3, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, not_heap_numbers);
+ __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+ __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+ // r2 is object type of r0.
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, slow);
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(eq, slow);
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, slow);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, slow);
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ mov(r0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(pc, Operand(lr)); // Return.
+}
+
+
+// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ and_(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+ EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // r0, r1, r2, r3 are the double representations of the left hand side
+ // and the right hand side.
+
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds rhs_not_nan.
+ EmitNanCheck(masm, &rhs_not_nan, cc_);
+
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in r0 and r1.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm);
+ }
+
+ Label check_for_symbols;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case r2 will contain the type of r0.
+ EmitCheckForTwoHeapNumbers(masm,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &slow);
+
+ __ bind(&check_for_symbols);
+ if (cc_ == eq) {
+ // Either jumps to slow or returns the answer. Assumes that r2 is the type
+ // of r0 on entry.
+ EmitCheckForSymbols(masm, &slow);
+ }
+
+ __ bind(&slow);
+ __ push(lr);
+ __ push(r1);
+ __ push(r0);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ int arg_count = 1; // Not counting receiver.
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ncr = LESS;
+ }
+ arg_count++;
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ mov(r0, Operand(arg_count));
+ __ InvokeBuiltin(native, CALL_JS);
+ __ cmp(r0, Operand(0));
+ __ pop(pc);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
static void AllocateHeapNumber(
MacroAssembler* masm,
Label* need_gc, // Jump here if young space is full.
@@ -4379,78 +4936,122 @@ static void AllocateHeapNumber(
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi.
+// the operands were not both Smi. The operands are in r0 and r1. In order
+// to call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in r0 and r1 (for the
+// value in r1) and r2 and r3 (for the value in r0).
static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin,
Token::Value operation,
- int swi_number,
OverwriteMode mode) {
- Label slow;
+ Label slow, slow_pop_2_first, do_the_call;
+ Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+ // Smi-smi case (overflow).
+ // Since both are Smis there is no heap number to overwrite, so allocate.
+ // The new heap number is in r5. r6 and r7 are scratch.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ __ jmp(&do_the_call); // Tail call. No return.
+
+ // We jump to here if something goes wrong (one param is not a number of any
+ // sort or new-space allocation fails).
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // Set number of arguments.
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
+ __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+ // We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
+ if (mode == NO_OVERWRITE) {
+ // In the case where there is no chance of an overwritable float we may as
+ // well do the allocation immediately while r0 and r1 are untouched.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ }
+
+ // Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- // Get map of r0 into r2.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Get type of r0 into r3.
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &slow);
- // Get type of r1 into r3.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check they are both the same map (heap number map).
- __ cmp(r2, r3);
+ __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- // Both are doubles.
+ if (mode == OVERWRITE_RIGHT) {
+ __ mov(r5, Operand(r0)); // Overwrite this heap number.
+ }
// Calling convention says that second double is in r2 and r3.
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-
- if (mode == NO_OVERWRITE) {
- // Get address of new heap number into r5.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ __ jmp(&finished_loading_r0);
+ __ bind(&r0_is_smi);
+ if (mode == OVERWRITE_RIGHT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- __ push(lr);
- __ push(r5);
- } else if (mode == OVERWRITE_LEFT) {
- __ push(lr);
- __ push(r1);
- } else {
- ASSERT(mode == OVERWRITE_RIGHT);
- __ push(lr);
- __ push(r0);
+ }
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ __ bind(&finished_loading_r0);
+
+ // Move r1 to a double in r0-r1.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
+ __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ if (mode == OVERWRITE_LEFT) {
+ __ mov(r5, Operand(r1)); // Overwrite this heap number.
}
// Calling convention says that first double is in r0 and r1.
__ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ __ jmp(&finished_loading_r1);
+ __ bind(&r1_is_smi);
+ if (mode == OVERWRITE_LEFT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ }
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ __ bind(&finished_loading_r1);
+
+ __ bind(&do_the_call);
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+ __ push(lr); // For later.
+ __ push(r5); // Address of heap number that is answer.
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
-#if !defined(__arm__)
- // Notify the simulator that we are calling an add routine in C.
- __ swi(swi_number);
-#else
- // Actually call the add routine written in C.
__ Call(r5);
-#endif
// Store answer in the overwritable heap number.
__ pop(r4);
-#if !defined(__ARM_EABI__) && defined(__arm__)
+#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
// substract the tag from r4.
__ sub(r5, r4, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
#else
- // Double returned in fp coprocessor register 0 and 1.
+ // Double returned in registers 0 and 1.
__ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+ __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
#endif
__ mov(r0, Operand(r4));
// And we are done.
@@ -4458,6 +5059,216 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
}
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Fastest for doubles that are in the ranges
+// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
+// almost to the range of signed int32 values that are not Smis. Jumps to the
+// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
+// (excluding the endpoints).
+static void GetInt32(MacroAssembler* masm,
+ Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ Label* slow) {
+ Label right_exponent, done;
+ // Get exponent word.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ __ mov(dest, Operand(0));
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(scratch2, Operand(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ __ b(eq, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ b(gt, slow);
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+ // Dest already has a Smi zero.
+ __ b(lt, &done);
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+
+ __ bind(&right_exponent);
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to take.
+ // We just orred in the implicit bit so that took care of one and we want to
+ // leave the sign bit 0 so we subtract 2 bits from the shift distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't actually
+ // need this because the bits get shifted out again, but it's probably slower
+ // to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ __ bind(&done);
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value. We truncate towards zero as required
+// by the ES spec. If this is the case we do the bitwise op and see if the
+// result is a Smi. If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in r0 and r1. On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
+ Label slow, result_not_a_smi;
+ Label r0_is_smi, r1_is_smi;
+ Label done_checking_r0, done_checking_r1;
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
+ __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ GetInt32(masm, r1, r3, r4, r5, &slow);
+ __ jmp(&done_checking_r1);
+ __ bind(&r1_is_smi);
+ __ mov(r3, Operand(r1, ASR, 1));
+ __ bind(&done_checking_r1);
+
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ GetInt32(masm, r0, r2, r4, r5, &slow);
+ __ jmp(&done_checking_r0);
+ __ bind(&r0_is_smi);
+ __ mov(r2, Operand(r0, ASR, 1));
+ __ bind(&done_checking_r0);
+
+ // r0 and r1: Original operands (Smi or heap numbers).
+ // r2 and r3: Signed int32 operands.
+ switch (op_) {
+ case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
+ case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
+ case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of writing
+ // the register as an unsigned int so we go to slow case if we hit this
+ // case.
+ __ b(mi, &slow);
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default: UNREACHABLE();
+ }
+ // check that the *signed* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ __ Ret();
+
+ Label have_to_allocate, got_a_heap_number;
+ __ bind(&result_not_a_smi);
+ switch (mode_) {
+ case OVERWRITE_RIGHT: {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(r0));
+ break;
+ }
+ case OVERWRITE_LEFT: {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(r1));
+ break;
+ }
+ case NO_OVERWRITE: {
+ // Get a new heap number in r5. r6 and r7 are scratch.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ }
+ default: break;
+ }
+ __ bind(&got_a_heap_number);
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (mode_ != NO_OVERWRITE) {
+ __ bind(&have_to_allocate);
+ // Get a new heap number in r5. r6 and r7 are scratch.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ jmp(&got_a_heap_number);
+ }
+
+ // If all else failed then we go to the runtime system.
+ __ bind(&slow);
+ __ push(r1); // restore stack
+ __ push(r0);
+ __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
+ switch (op_) {
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -4483,7 +5294,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
&not_smi,
Builtins::ADD,
Token::ADD,
- assembler::arm::simulator_fp_add,
mode_);
break;
}
@@ -4503,7 +5313,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
&not_smi,
Builtins::SUB,
Token::SUB,
- assembler::arm::simulator_fp_sub,
mode_);
break;
}
@@ -4532,14 +5341,16 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
&not_smi,
Builtins::MUL,
Token::MUL,
- assembler::arm::simulator_fp_mul,
- mode_);
+ mode_);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
- case Token::BIT_XOR: {
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
Label slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
@@ -4548,84 +5359,47 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- __ push(r1); // restore stack
- __ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
- switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
- break;
- }
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- Label slow;
- ASSERT(kSmiTag == 0); // adjust code below
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow);
- // remove tags from operands (but keep sign)
- __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
- // use only the 5 least significant bits of the shift count
- __ and_(r2, r2, Operand(0x1f));
- // perform operation
- switch (op_) {
case Token::SAR:
- __ mov(r3, Operand(r3, ASR, r2));
- // no checks of result necessary
+ // Remove tags from right operand.
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r0, Operand(r1, ASR, r2));
+ // Smi tag result.
+ __ and_(r0, r0, Operand(~kSmiTagMask));
break;
-
case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
__ mov(r3, Operand(r3, LSR, r2));
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ and_(r2, r3, Operand(0xc0000000), SetCC);
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ tst(r3, Operand(0xc0000000));
__ b(ne, &slow);
+ // Smi tag result.
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
break;
-
case Token::SHL:
+ // Remove tags from operands.
+ __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
__ mov(r3, Operand(r3, LSL, r2));
- // check that the *signed* result fits in a smi
+ // Check that the signed result fits in a Smi.
__ add(r2, r3, Operand(0x40000000), SetCC);
__ b(mi, &slow);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
break;
-
default: UNREACHABLE();
}
- // tag result and store it in r0
- ASSERT(kSmiTag == 0); // adjust code below
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ Ret();
- // slow case
__ bind(&slow);
- __ push(r1); // restore stack
- __ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
- switch (op_) {
- case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break;
- case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break;
- case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
- default: UNREACHABLE();
- }
+ HandleNonSmiBitwiseOp(masm);
break;
}
@@ -4656,11 +5430,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void UnarySubStub::Generate(MacroAssembler* masm) {
Label undo;
Label slow;
- Label done;
+ Label not_smi;
// Enter runtime system if the value is not a smi.
__ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ b(ne, &not_smi);
// Enter runtime system if the value of the expression is zero
// to make sure that we switch between 0 and -0.
@@ -4672,33 +5446,56 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ rsb(r1, r0, Operand(0), SetCC);
__ b(vs, &slow);
- // If result is a smi we are done.
- __ tst(r1, Operand(kSmiTagMask));
- __ mov(r0, Operand(r1), LeaveCC, eq); // conditionally set r0 to result
- __ b(eq, &done);
+ __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ StubReturn(1);
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // set number of arguments
+ __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- __ bind(&done);
+ __ bind(&not_smi);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
__ StubReturn(1);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // r0 holds exception
- ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // r0 holds the exception.
+
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
- __ pop(r2); // pop next in chain
+
+ // Restore the next handler and frame pointer, discard handler state.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r2);
__ str(r2, MemOperand(r3));
- // restore parameter- and frame-pointer and pop state.
- __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
+ ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
__ cmp(fp, Operand(0));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0), LeaveCC, eq);
@@ -4709,39 +5506,41 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(lr, Operand(pc));
}
#endif
+ ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
}
void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
- // Fetch top stack handler.
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(r3, MemOperand(r3));
+ __ ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
- StackHandlerConstants::kStateOffset;
- __ ldr(r2, MemOperand(r3, kStateOffset));
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ ldr(r2, MemOperand(sp, kStateOffset));
__ cmp(r2, Operand(StackHandler::ENTRY));
__ b(eq, &done);
// Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
- StackHandlerConstants::kNextOffset;
- __ ldr(r3, MemOperand(r3, kNextOffset));
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ ldr(sp, MemOperand(sp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- __ ldr(r0, MemOperand(r3, kNextOffset));
- __ mov(r2, Operand(ExternalReference(Top::k_handler_address)));
- __ str(r0, MemOperand(r2));
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r0);
+ __ str(r0, MemOperand(r3));
// Set external caught exception to false.
- __ mov(r0, Operand(false));
ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(r0, Operand(false));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
@@ -4751,21 +5550,17 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
__ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
__ str(r0, MemOperand(r2));
- // Restore the stack to the address of the ENTRY handler
- __ mov(sp, Operand(r3));
-
- // Stack layout at this point. See also PushTryHandler
- // r3, sp -> next handler
- // state (ENTRY)
- // pp
- // fp
- // lr
-
- // Discard ENTRY state (r2 is not used), and restore parameter-
- // and frame-pointer and pop state.
- __ ldm(ia_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit());
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // lr
+
+ // Discard handler state (r2 is not used) and restore frame pointer.
+ ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
__ cmp(fp, Operand(0));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0), LeaveCC, eq);
@@ -4776,6 +5571,7 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
__ mov(lr, Operand(pc));
}
#endif
+ ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
}
@@ -4793,7 +5589,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Passing r0.
- __ Call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+ ExternalReference gc_reference = ExternalReference::perform_gc_function();
+ __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
}
ExternalReference scope_depth =
@@ -4817,14 +5614,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
- __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
- __ push(lr);
-#if !defined(__arm__)
- // Notify the simulator of the transition to C code.
- __ swi(assembler::arm::call_rt_r5);
-#else /* !defined(__arm__) */
- __ Jump(r5);
-#endif /* !defined(__arm__) */
+ masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
+ masm->push(lr);
+ masm->Jump(r5);
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -4847,7 +5639,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- // pp: caller's parameter pointer pp (restored as C callee-saved)
__ LeaveExitFrame(frame_type);
// check if we should retry or throw exception
@@ -4887,9 +5678,8 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// r0: number of arguments including receiver
// r1: pointer to builtin function
// fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's pp after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
- // pp: caller's parameter pointer pp (C callee-saved)
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
@@ -4960,7 +5750,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Called from C, so do not pop argc and args on exit (preserve sp)
// No need to save register-passed args
- // Save callee-saved registers (incl. cp, pp, and fp), sp, and lr
+ // Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
// Get address of argv, see stm above.
@@ -5004,10 +5794,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler returns
- // control to the code after the bl(&invoke) above, which restores all
- // kCalleeSaved registers (including cp, pp and fp) to their saved values
- // before returning a failure to C.
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bl(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
// Clear any pending exceptions.
__ mov(ip, Operand(ExternalReference::the_hole_value_location()));
@@ -5070,6 +5860,66 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
+// This stub performs an instanceof, calling the builtin function if
+// necessary. Uses r1 for the object, r0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - slow case for smis (we may need to throw an exception
+ // depending on the rhs).
+ Label slow, loop, is_instance, is_not_instance;
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ BranchOnSmi(r0, &slow);
+
+ // Check that the left hand is a JS object and put map in r3.
+ __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ // Get the prototype of the function (r4 is result, r2 is scratch).
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ BranchOnSmi(r4, &slow);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ // Register mapping: r3 is object map and r4 is function prototype.
+ // Get prototype of object into r2.
+ __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ bind(&loop);
+ __ cmp(r2, Operand(r4));
+ __ b(eq, &is_instance);
+ __ cmp(r2, Operand(Factory::null_value()));
+ __ b(eq, &is_not_instance);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&is_not_instance);
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ __ mov(r0, Operand(1)); // Arg count without receiver.
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+}
+
+
void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -5098,8 +5948,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check that the key is a smi.
Label slow;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ BranchOnNotSmi(r1, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -5171,12 +6020,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow);
+ __ BranchOnSmi(r1, &slow);
// Get the map of the function object.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &slow);
// Fast-case: Invoke the function now.
@@ -5194,6 +6040,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
+ return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
index a8cb777..7760e47 100644
--- a/V8Binding/v8/src/arm/codegen-arm.h
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -292,7 +292,10 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
- void Comparison(Condition cc, bool strict = false);
+ void Comparison(Condition cc,
+ Expression* left,
+ Expression* right,
+ bool strict = false);
void SmiOperation(Token::Value op,
Handle<Object> value,
@@ -333,11 +336,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ // Support for construct call checks.
+ void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
- // Support for accessing the value field of an object (used by Date).
+ // Support for accessing the class and value fields of an object.
+ void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
@@ -349,58 +356,14 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
- // Methods and constants for fast case switch statement support.
- //
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 10;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMaxOverheadFactor();
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMinCaseCount();
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels);
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame);
-
- // Fast support for constant-Smi switches.
- void GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
+ // Fast support for Math.random().
+ void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+ // Fast support for Math.sin and Math.cos.
+ enum MathOp { SIN, COS };
+ void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+ inline void GenerateMathSin(ZoneList<Expression*>* args);
+ inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
diff --git a/V8Binding/v8/src/arm/constants-arm.h b/V8Binding/v8/src/arm/constants-arm.h
index 99eab23..f0311df 100644
--- a/V8Binding/v8/src/arm/constants-arm.h
+++ b/V8Binding/v8/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,27 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
+// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
+// are not running on real ARM hardware. One reason for this is that the
+// old ABI uses fp registers in the calling convention and the simulator does
+// not simulate fp registers or coroutine instructions.
+#if defined(__ARM_EABI__) || !defined(__arm__)
+# define USE_ARM_EABI 1
+#endif
+
+// This means that interwork-compatible jump instructions are generated. We
+// want to generate them on the simulator too so it makes snapshots that can
+// be used on real hardware.
+#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
+# define USE_THUMB_INTERWORK 1
+#endif
+
+// Simulator should support ARM5 instructions.
+#if !defined(__arm__)
+# define __ARM_ARCH_5__ 1
+# define __ARM_ARCH_5T__ 1
+#endif
+
namespace assembler {
namespace arm {
@@ -89,6 +110,24 @@ enum Opcode {
};
+// Some special instructions encoded as a TEQ with S=0 (bit 20).
+enum Opcode9Bits {
+ BX = 1,
+ BXJ = 2,
+ BLX = 3,
+ BKPT = 7
+};
+
+
+// Some special instructions encoded as a CMN with S=0 (bit 20).
+enum Opcode11Bits {
+ CLZ = 1
+};
+
+
+// S
+
+
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
no_shift = -1,
@@ -104,15 +143,9 @@ enum Shift {
// simulator.
enum SoftwareInterruptCodes {
// transition to C code
- call_rt_r5 = 0x10,
- call_rt_r2 = 0x11,
+ call_rt_redirected = 0x10,
// break point
- break_point = 0x20,
- // FP operations. These simulate calling into C for a moment to do fp ops.
- // They should trash all caller-save registers.
- simulator_fp_add = 0x21,
- simulator_fp_sub = 0x22,
- simulator_fp_mul = 0x23
+ break_point = 0x20
};
diff --git a/V8Binding/v8/src/arm/cpu-arm.cc b/V8Binding/v8/src/arm/cpu-arm.cc
index 71da1ec..cafefce 100644
--- a/V8Binding/v8/src/arm/cpu-arm.cc
+++ b/V8Binding/v8/src/arm/cpu-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -46,6 +46,8 @@ void CPU::FlushICache(void* start, size_t size) {
#if !defined (__arm__)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. No I$ flushes are necessary.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc
index f56a599..588732b 100644
--- a/V8Binding/v8/src/arm/disasm-arm.cc
+++ b/V8Binding/v8/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -253,24 +253,12 @@ void Decoder::PrintPU(Instr* instr) {
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
switch (swi) {
- case call_rt_r5:
- Print("call_rt_r5");
- return;
- case call_rt_r2:
- Print("call_rt_r2");
+ case call_rt_redirected:
+ Print("call_rt_redirected");
return;
case break_point:
Print("break_point");
return;
- case simulator_fp_add:
- Print("simulator_fp_add");
- return;
- case simulator_fp_mul:
- Print("simulator_fp_mul");
- return;
- case simulator_fp_sub:
- Print("simulator_fp_sub");
- return;
default:
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
@@ -450,7 +438,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
- if (instr->Bit(22) == 0) {
+ if (instr->Bit(22) == 1) {
Print("u");
} else {
Print("s");
@@ -511,7 +499,7 @@ void Decoder::DecodeType01(Instr* instr) {
Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
}
} else {
- Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
+ Format(instr, "'um'al'cond's 'rn, 'rd, 'rm, 'rs");
}
} else {
Unknown(instr); // not used by V8
@@ -605,7 +593,17 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "teq'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ switch (instr->Bits(7, 4)) {
+ case BX:
+ Format(instr, "bx'cond 'rm");
+ break;
+ case BLX:
+ Format(instr, "blx'cond 'rm");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
}
break;
}
@@ -621,7 +619,14 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmn'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ switch (instr->Bits(7, 4)) {
+ case CLZ:
+ Format(instr, "clz'cond 'rd, 'rm");
+ break;
+ default:
+ Unknown(instr); // not used by V8
+ break;
+ }
}
break;
}
diff --git a/V8Binding/v8/src/arm/frames-arm.h b/V8Binding/v8/src/arm/frames-arm.h
index a67b18a..0874c09 100644
--- a/V8Binding/v8/src/arm/frames-arm.h
+++ b/V8Binding/v8/src/arm/frames-arm.h
@@ -68,7 +68,7 @@ static const RegList kCalleeSaved =
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available
<< 9 | // r9 v6
- 1 << 10 | // r10 v7 (pp in JavaScript code)
+ 1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
static const int kNumCalleeSaved = 7 + kR9Available;
@@ -79,15 +79,11 @@ static const int kNumCalleeSaved = 7 + kR9Available;
class StackHandlerConstants : public AllStatic {
public:
- // TODO(1233780): Get rid of the code slot in stack handlers.
- static const int kCodeOffset = 0 * kPointerSize;
- static const int kNextOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kPPOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
- static const int kPCOffset = 5 * kPointerSize;
-
- static const int kAddressDisplacement = -1 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kStateOffset = 1 * kPointerSize;
+ static const int kFPOffset = 2 * kPointerSize;
+ static const int kPCOffset = 3 * kPointerSize;
+
static const int kSize = kPCOffset + kPointerSize;
};
@@ -108,14 +104,14 @@ class ExitFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = 0 * kPointerSize;
- // Let the parameters pointer for exit frames point just below the
- // frame structure on the stack.
- static const int kPPDisplacement = 3 * kPointerSize;
-
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPPOffset = +1 * kPointerSize;
+ // The calling JS function is between FP and PC.
static const int kCallerPCOffset = +2 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = +3 * kPointerSize;
};
@@ -137,7 +133,7 @@ class JavaScriptFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
- // PP-relative.
+ // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
@@ -161,220 +157,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-// ----------------------------------------------------
-
-
-
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // | JS frame |
- // | |
- // | |
- // ----------- +=============+ <--- sp (stack pointer)
- // | function |
- // +-------------+
- // +-------------+
- // | |
- // | expressions |
- // | |
- // +-------------+
- // | |
- // a | locals |
- // c | |
- // t +- - - - - - -+ <---
- // i -4 | local0 | ^
- // v +-------------+ |
- // a -3 | code | |
- // t +-------------+ | kLocal0Offset
- // i -2 | context | |
- // o +-------------+ |
- // n -1 | args_length | v
- // +-------------+ <--- fp (frame pointer)
- // 0 | caller_pp |
- // f +-------------+
- // r 1 | caller_fp |
- // a +-------------+
- // m 2 | sp_on_exit | (pp if return, caller_sp if no return)
- // e +-------------+
- // 3 | caller_pc |
- // +-------------+ <--- caller_sp (incl. parameters)
- // | |
- // | parameters |
- // | |
- // +- - - - - - -+ <---
- // -2 | parameter0 | ^
- // +-------------+ | kParam0Offset
- // -1 | receiver | v
- // ----------- +=============+ <--- pp (parameter pointer, r10)
- // 0 | function |
- // +-------------+
- // | |
- // |caller-saved | (must be valid JS values, traversed during GC)
- // | regs |
- // | |
- // +-------------+
- // | |
- // | caller |
- // higher | expressions |
- // addresses | |
- // | |
- // | JS frame |
-
-
-
- // Handler frames (part of expressions of JS frames):
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // h | expressions |
- // a | |
- // n +-------------+
- // d -1 | code |
- // l +-------------+ <--- handler sp
- // e 0 | next_sp | link to next handler (next handler's sp)
- // r +-------------+
- // 1 | state |
- // f +-------------+
- // r 2 | pp |
- // a +-------------+
- // m 3 | fp |
- // e +-------------+
- // 4 | pc |
- // +-------------+
- // | |
- // higher | expressions |
- // addresses | |
-
-
-
- // JS entry frames: When calling from C to JS, we construct two extra
- // frames: An entry frame (C) and a trampoline frame (JS). The
- // following pictures shows the two frames:
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // | JS frame |
- // | |
- // | |
- // ----------- +=============+ <--- sp (stack pointer)
- // | |
- // | parameters |
- // t | |
- // r +- - - - - - -+
- // a | parameter0 |
- // m +-------------+
- // p | receiver |
- // o +-------------+
- // l | function |
- // i +-------------+
- // n -3 | code |
- // e +-------------+
- // -2 | NULL | context is always NULL
- // +-------------+
- // f -1 | 0 | args_length is always zero
- // r +-------------+ <--- fp (frame pointer)
- // a 0 | NULL | caller pp is always NULL for entries
- // m +-------------+
- // e 1 | caller_fp |
- // +-------------+
- // 2 | sp_on_exit | (caller_sp)
- // +-------------+
- // 3 | caller_pc |
- // ----------- +=============+ <--- caller_sp == pp
- // . ^
- // . | try-handler, fake, not GC'ed
- // . v
- // +-------------+ <---
- // -2 | next top pp |
- // +-------------+
- // -1 | next top fp |
- // +-------------+ <--- fp
- // | r4 | r4-r9 holding non-JS values must be preserved
- // +-------------+
- // J | r5 | before being initialized not to confuse GC
- // S +-------------+
- // | r6 |
- // +-------------+
- // e | r7 |
- // n +-------------+
- // t | r8 |
- // r +-------------+
- // y [ | r9 | ] only if r9 available
- // +-------------+
- // | r10 |
- // f +-------------+
- // r | r11 |
- // a +-------------+
- // m | caller_sp |
- // e +-------------+
- // | caller_pc |
- // +-------------+ <--- caller_sp
- // | argv | passed on stack from C code
- // +-------------+
- // | |
- // higher | |
- // addresses | C frame |
-
-
- // The first 4 args are passed from C in r0-r3 and are not spilled on entry:
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
-
- // C entry frames: When calling from JS to C, we construct one extra
- // frame:
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // | C frame |
- // | |
- // | |
- // ----------- +=============+ <--- sp (stack pointer)
- // | |
- // | parameters | (first 4 args are passed in r0-r3)
- // | |
- // +-------------+ <--- fp (frame pointer)
- // f 4/5 | caller_fp |
- // r +-------------+
- // a 5/6 | sp_on_exit | (pp)
- // m +-------------+
- // e 6/7 | caller_pc |
- // +-------------+ <--- caller_sp (incl. parameters)
- // 7/8 | |
- // | parameters |
- // | |
- // +- - - - - - -+ <---
- // -2 | parameter0 | ^
- // +-------------+ | kParam0Offset
- // -1 | receiver | v
- // ----------- +=============+ <--- pp (parameter pointer, r10)
- // 0 | function |
- // +-------------+
- // | |
- // |caller-saved |
- // | regs |
- // | |
- // +-------------+
- // | |
- // | caller |
- // | expressions |
- // | |
- // higher | |
- // addresses | JS frame |
-
-
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
index 9b45c46..5519771 100644
--- a/V8Binding/v8/src/arm/ic-arm.cc
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -67,11 +67,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Load the map into t0.
__ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
- __ ldr(t0, FieldMemOperand(t1, Map::kInstanceAttributesOffset));
- __ tst(t0, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+ __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset));
+ __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
// Jump to miss if the interceptor bit is set.
__ b(ne, miss);
+ // Bail out if we have a JS global object.
+ __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, miss);
// Check that the properties array is a dictionary.
__ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
@@ -223,9 +227,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for number.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &number);
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ b(ne, &non_number);
__ bind(&number);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -272,9 +274,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ b(eq, miss);
// Check that the value is a JSFunction.
- __ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- __ cmp(r0, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ b(ne, miss);
// Check that the function has been loaded.
@@ -312,10 +312,8 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
- // Check that the receiver is a valid JS object.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the receiver is a valid JS object. Put the map in r3.
+ __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
__ b(lt, &miss);
// If this assert fails, we have to check upper bound too.
@@ -392,9 +390,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &invoke);
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(ne, &invoke);
@@ -447,10 +443,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
- // Check that the receiver is a valid JS object.
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the receiver is a valid JS object. Put the map in r3.
+ __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
__ b(lt, &miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -513,6 +507,12 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ return false;
+}
+
Object* KeyedLoadIC_Miss(Arguments args);
diff --git a/V8Binding/v8/src/arm/jump-target-arm.cc b/V8Binding/v8/src/arm/jump-target-arm.cc
index 65e7eaf..a925c51 100644
--- a/V8Binding/v8/src/arm/jump-target-arm.cc
+++ b/V8Binding/v8/src/arm/jump-target-arm.cc
@@ -149,7 +149,7 @@ void JumpTarget::Call() {
}
-void JumpTarget::DoBind(int mergable_elements) {
+void JumpTarget::DoBind() {
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
@@ -207,7 +207,7 @@ void JumpTarget::DoBind(int mergable_elements) {
// Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) {
- ComputeEntryFrame(mergable_elements);
+ ComputeEntryFrame();
}
// Some moves required to merge to an expected frame require purely
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.cc b/V8Binding/v8/src/arm/macro-assembler-arm.cc
index 4e24063..3d6b8cb 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.cc
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,11 +35,6 @@
namespace v8 {
namespace internal {
-// Give alias names to registers
-Register cp = { 8 }; // JavaScript context pointer
-Register pp = { 10 }; // parameter pointer
-
-
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
@@ -51,14 +46,14 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// We always generate arm code, never thumb code, even if V8 is compiled to
// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
+#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
#error "flag -mthumb-interwork missing"
#endif
// We do not support thumb inter-working with an arm architecture not supporting
// the blx instruction (below v5t)
-#if defined(__THUMB_INTERWORK__)
+#if defined(USE_THUMB_INTERWORK)
#if !defined(__ARM_ARCH_5T__) && \
!defined(__ARM_ARCH_5TE__) && \
!defined(__ARM_ARCH_7A__) && \
@@ -70,12 +65,12 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// Using blx may yield better code, so use it when required or when available
-#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__)
+#if defined(USE_THUMB_INTERWORK) || defined(__ARM_ARCH_5__)
#define USE_BLX 1
#endif
// Using bx does not yield better code, so use it only when required
-#if defined(__THUMB_INTERWORK__)
+#if defined(USE_THUMB_INTERWORK)
#define USE_BX 1
#endif
@@ -128,26 +123,10 @@ void MacroAssembler::Call(Register target, Condition cond) {
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
-#if !defined(__arm__)
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- mov(r2, Operand(target, rmode), LeaveCC, cond);
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- // Notify the simulator of the transition to C code.
- swi(assembler::arm::call_rt_r2);
- } else {
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- // emit a ldr<cond> pc, [pc + offset of target in constant pool]
- mov(pc, Operand(target, rmode), LeaveCC, cond);
- }
-#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif // !defined(__arm__)
// If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
// 'blx ip'; however, the code would not be shorter than the above sequence
// and the target address of the call would be referenced by the first
@@ -301,8 +280,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
sub(r6, r6, Operand(kPointerSize));
- // Compute parameter pointer before making changes and save it as ip
- // register so that it is restored as sp register on exit, thereby
+ // Compute callee's stack pointer before making changes and save it as
+ // ip register so that it is restored as sp register on exit, thereby
// popping the args.
// ip = sp + kPointerSize * #args;
@@ -573,41 +552,48 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
#endif
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
- ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// The pc (return address) is passed in register lr.
if (try_location == IN_JAVASCRIPT) {
- stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
if (type == TRY_CATCH_HANDLER) {
mov(r3, Operand(StackHandler::TRY_CATCH));
} else {
mov(r3, Operand(StackHandler::TRY_FINALLY));
}
- push(r3); // state
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
+ // Save the current handler as the next handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(r1, MemOperand(r3));
- push(r1); // next sp
- str(sp, MemOperand(r3)); // chain handler
- mov(r0, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
- push(r0);
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ push(r1);
+ // Link this handler as the new current one.
+ str(sp, MemOperand(r3));
} else {
// Must preserve r0-r4, r5-r7 are available.
ASSERT(try_location == IN_JS_ENTRY);
- // The parameter pointer is meaningless here and fp does not point to a JS
- // frame. So we save NULL for both pp and fp. We expect the code throwing an
- // exception to check fp before dereferencing it to restore the context.
- mov(pp, Operand(0)); // set pp to NULL
- mov(ip, Operand(0)); // to save a NULL fp
- stm(db_w, sp, pp.bit() | ip.bit() | lr.bit());
+ // The frame pointer does not point to a JS frame so we save NULL
+ // for fp. We expect the code throwing an exception to check fp
+ // before dereferencing it to restore the context.
+ mov(ip, Operand(0)); // To save a NULL frame pointer.
mov(r6, Operand(StackHandler::ENTRY));
- push(r6); // state
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
+ // Save the current handler as the next handler.
mov(r7, Operand(ExternalReference(Top::k_handler_address)));
ldr(r6, MemOperand(r7));
- push(r6); // next sp
- str(sp, MemOperand(r7)); // chain handler
- mov(r5, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
- push(r5); // flush TOS
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ push(r6);
+ // Link this handler as the new current one.
+ str(sp, MemOperand(r7));
}
}
@@ -759,6 +745,62 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::CompareObjectType(Register function,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
+ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ cmp(type_reg, Operand(type));
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ BranchOnSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+ b(ne, miss);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ b(ne, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ cmp(result, Operand(Factory::the_hole_value()));
+ b(eq, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+ b(ne, &done);
+
+ // Get the prototype from the initial map.
+ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET);
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.h b/V8Binding/v8/src/arm/macro-assembler-arm.h
index 27eeab2..ab74805 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.h
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.h
@@ -35,8 +35,7 @@ namespace internal {
// Give alias names to registers
-extern Register cp; // JavaScript context pointer
-extern Register pp; // parameter pointer
+const Register cp = { 8 }; // JavaScript context pointer
// Helper types to make boolean flag easier to read at call-site.
@@ -187,6 +186,38 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other // registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+ inline void BranchOnSmi(Register value, Label* smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(eq, smi_label);
+ }
+
+ inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(ne, not_smi_label);
+ }
+
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc
index b8b6663..53dbec9 100644
--- a/V8Binding/v8/src/arm/simulator-arm.cc
+++ b/V8Binding/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "v8.h"
#include "disasm.h"
+#include "assembler.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
@@ -380,7 +381,23 @@ void Debugger::Debug() {
}
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+ if (initialized_) return;
+ simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+ initialized_ = true;
+ ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
Simulator::Simulator() {
+ ASSERT(initialized_);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -412,9 +429,63 @@ Simulator::Simulator() {
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key =
- v8::internal::Thread::CreateThreadLocalKey();
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, bool fp_return)
+ : external_function_(external_function),
+ swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
+ fp_return_(fp_return),
+ next_(list_) {
+ list_ = this;
+ }
+
+ void* address_of_swi_instruction() {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
+
+ void* external_function() { return external_function_; }
+ bool fp_return() { return fp_return_; }
+
+ static Redirection* Get(void* external_function, bool fp_return) {
+ Redirection* current;
+ for (current = list_; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) return current;
+ }
+ return new Redirection(external_function, fp_return);
+ }
+
+ static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+ char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+ char* addr_of_redirection =
+ addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ private:
+ void* external_function_;
+ uint32_t swi_instruction_;
+ bool fp_return_;
+ Redirection* next_;
+ static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ bool fp_return) {
+ Redirection* redirection = Redirection::Get(external_function, fp_return);
+ return redirection->address_of_swi_instruction();
+}
+
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
@@ -921,7 +992,14 @@ void Simulator::HandleRList(Instr* instr, bool load) {
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the r1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
// Software interrupt instructions are used by the simulator to call into the
@@ -929,30 +1007,54 @@ typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
void Simulator::SoftwareInterrupt(Instr* instr) {
int swi = instr->SwiField();
switch (swi) {
- case call_rt_r5: {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
- intptr_t arg0 = get_register(r0);
- intptr_t arg1 = get_register(r1);
- int64_t result = target(arg0, arg1);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
- break;
- }
- case call_rt_r2: {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(get_register(r2));
- intptr_t arg0 = get_register(r0);
- intptr_t arg1 = get_register(r1);
- int64_t result = target(arg0, arg1);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ case call_rt_redirected: {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = get_register(r0);
+ int32_t arg1 = get_register(r1);
+ int32_t arg2 = get_register(r2);
+ int32_t arg3 = get_register(r3);
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_lr = get_register(lr);
+ if (redirection->fp_return()) {
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ double x, y;
+ GetFpArgs(&x, &y);
+ PrintF("Call to host function at %p with args %f, %f\n",
+ FUNCTION_ADDR(target), x, y);
+ }
+ double result = target(arg0, arg1, arg2, arg3);
+ SetFpResult(result);
+ } else {
+ intptr_t external =
+ reinterpret_cast<int32_t>(redirection->external_function());
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+ FUNCTION_ADDR(target),
+ arg0,
+ arg1,
+ arg2,
+ arg3);
+ }
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x\n", lo_res);
+ }
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ set_register(r0, result);
+ }
+ set_register(lr, saved_lr);
+ set_pc(get_register(lr));
break;
}
case break_point: {
@@ -960,30 +1062,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
dbg.Debug();
break;
}
- {
- double x, y, z;
- case simulator_fp_add:
- GetFpArgs(&x, &y);
- z = x + y;
- SetFpResult(z);
- TrashCallerSaveRegisters();
- set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
- break;
- case simulator_fp_sub:
- GetFpArgs(&x, &y);
- z = x - y;
- SetFpResult(z);
- TrashCallerSaveRegisters();
- set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
- break;
- case simulator_fp_mul:
- GetFpArgs(&x, &y);
- z = x * y;
- SetFpResult(z);
- TrashCallerSaveRegisters();
- set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
- break;
- }
default: {
UNREACHABLE();
break;
@@ -1282,7 +1360,21 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
- UNIMPLEMENTED();
+ ASSERT(type == 0);
+ int rm = instr->RmField();
+ switch (instr->Bits(7, 4)) {
+ case BX:
+ set_pc(get_register(rm));
+ break;
+ case BLX: {
+ uint32_t old_pc = get_pc();
+ set_pc(get_register(rm));
+ set_register(lr, old_pc + Instr::kInstrSize);
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ }
}
break;
}
@@ -1306,7 +1398,27 @@ void Simulator::DecodeType01(Instr* instr) {
Format(instr, "cmn'cond 'rn, 'shift_rm");
Format(instr, "cmn'cond 'rn, 'imm");
} else {
- UNIMPLEMENTED();
+ ASSERT(type == 0);
+ int rm = instr->RmField();
+ int rd = instr->RdField();
+ switch (instr->Bits(7, 4)) {
+ case CLZ: {
+ uint32_t bits = get_register(rm);
+ int leading_zeros = 0;
+ if (bits == 0) {
+ leading_zeros = 32;
+ } else {
+ while ((bits & 0x80000000u) == 0) {
+ bits <<= 1;
+ leading_zeros++;
+ }
+ }
+ set_register(rd, leading_zeros);
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ }
}
break;
}
diff --git a/V8Binding/v8/src/arm/simulator-arm.h b/V8Binding/v8/src/arm/simulator-arm.h
index d4a395a..15b92a5 100644
--- a/V8Binding/v8/src/arm/simulator-arm.h
+++ b/V8Binding/v8/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -106,6 +106,9 @@ class Simulator {
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
+ // Call on program start.
+ static void Initialize();
+
// V8 generally calls into generated code with 5 parameters. This is a
// convenience function, which sets up the simulator state and grabs the
// result on return.
@@ -175,6 +178,10 @@ class Simulator {
// Executes one instruction.
void InstructionDecode(Instr* instr);
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ bool fp_return);
+
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void GetFpArgs(double* x, double* y);
@@ -192,6 +199,7 @@ class Simulator {
char* stack_;
bool pc_modified_;
int icount_;
+ static bool initialized_;
// registered breakpoints
Instr* break_pc_;
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
index c09f9e3..71f2225 100644
--- a/V8Binding/v8/src/arm/stub-cache-arm.cc
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -283,9 +283,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ b(eq, miss_label);
// Check that the object is a JS array.
- __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_ARRAY_TYPE));
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
__ b(ne, miss_label);
// Load length directly from the JS array.
@@ -498,9 +496,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name,
- Code::Flags flags) {
- ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
+ String* name) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -523,9 +519,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
// Get the map.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &miss);
// Patch the receiver on the stack with the global proxy if
@@ -544,16 +538,14 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCodeWithFlags(flags, name);
+ return GetCode(FIELD, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check,
- Code::Flags flags) {
- ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
+ CheckType check) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -588,9 +580,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case STRING_CHECK:
// Check that the object is a two-byte string or a symbol.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
__ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
@@ -605,9 +595,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &fast);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(HEAP_NUMBER_TYPE));
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
@@ -656,6 +644,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
@@ -671,7 +660,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
- return GetCodeWithFlags(flags, function_name);
+ return GetCode(CONSTANT_FUNCTION, function_name);
}
@@ -695,6 +684,61 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
+Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ // Check that the map of the global has not changed.
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize));
+ __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Get the value from the cell.
+ __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check that the cell contains the same function.
+ __ cmp(r1, Operand(Handle<JSFunction>(function)));
+ __ b(ne, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ __ ldr(r3, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, argc * kPointerSize));
+
+ // Setup the context (function already in r1).
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments(),
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::call_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -835,6 +879,45 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
+Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
+
+ // Check that the map of the global has not changed.
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Store the value in the cell.
+ __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ mov(r1, Operand(JSGlobalPropertyCell::kValueOffset));
+ __ RecordWrite(r2, r1, r3);
+
+ __ Ret();
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
@@ -929,6 +1012,47 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
+Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+
+ // Check that the map of the global has not changed.
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Get the value from the cell.
+ __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ b(eq, &miss);
+ }
+
+ __ Ret();
+
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
// TODO(1224671): IC stubs for keyed loads have not been implemented
// for ARM.
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc
index 9527383..3d0ada7 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.cc
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc
@@ -156,9 +156,7 @@ void VirtualFrame::Enter() {
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
__ bind(&map_check);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(eq, &done);
__ stop("VirtualFrame::Enter - r1 is not a function (map check).");
__ bind(&done);
@@ -230,8 +228,8 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
void VirtualFrame::PushTryHandler(HandlerType type) {
- // Grow the expression stack by handler size less one (the return address
- // is already pushed by a call instruction).
+ // Grow the expression stack by handler size less one (the return
+ // address in lr is already counted by a call instruction).
Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type);
}
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.h b/V8Binding/v8/src/arm/virtual-frame-arm.h
index ebebd53..2f36f10 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.h
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.h
@@ -359,14 +359,14 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Register reg);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/V8Binding/v8/src/array.js b/V8Binding/v8/src/array.js
index ed84b5f..eb69f97 100644
--- a/V8Binding/v8/src/array.js
+++ b/V8Binding/v8/src/array.js
@@ -769,6 +769,63 @@ function ArraySort(comparefn) {
}
}
+ function SafeRemoveArrayHoles(obj) {
+ // Copy defined elements from the end to fill in all holes and undefineds
+ // in the beginning of the array. Write undefineds and holes at the end
+ // after loop is finished.
+ var first_undefined = 0;
+ var last_defined = length - 1;
+ var num_holes = 0;
+ while (first_undefined < last_defined) {
+ // Find first undefined element.
+ while (first_undefined < last_defined &&
+ !IS_UNDEFINED(obj[first_undefined])) {
+ first_undefined++;
+ }
+ // Maintain the invariant num_holes = the number of holes in the original
+ // array with indices <= first_undefined or > last_defined.
+ if (!obj.hasOwnProperty(first_undefined)) {
+ num_holes++;
+ }
+
+ // Find last defined element.
+ while (first_undefined < last_defined &&
+ IS_UNDEFINED(obj[last_defined])) {
+ if (!obj.hasOwnProperty(last_defined)) {
+ num_holes++;
+ }
+ last_defined--;
+ }
+ if (first_undefined < last_defined) {
+ // Fill in hole or undefined.
+ obj[first_undefined] = obj[last_defined];
+ obj[last_defined] = void 0;
+ }
+ }
+ // If there were any undefineds in the entire array, first_undefined
+ // points to one past the last defined element. Make this true if
+ // there were no undefineds, as well, so that first_undefined == number
+ // of defined elements.
+ if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
+ // Fill in the undefineds and the holes. There may be a hole where
+ // an undefined should be and vice versa.
+ var i;
+ for (i = first_undefined; i < length - num_holes; i++) {
+ obj[i] = void 0;
+ }
+ for (i = length - num_holes; i < length; i++) {
+ // For compatability with Webkit, do not expose elements in the prototype.
+ if (i in obj.__proto__) {
+ obj[i] = void 0;
+ } else {
+ delete obj[i];
+ }
+ }
+
+ // Return the number of defined elements.
+ return first_undefined;
+ }
+
var length = ToUint32(this.length);
if (length < 2) return this;
@@ -787,6 +844,12 @@ function ArraySort(comparefn) {
}
var num_non_undefined = %RemoveArrayHoles(this, length);
+ if (num_non_undefined == -1) {
+ // There were indexed accessors in the array. Move array holes and
+ // undefineds to the end using a Javascript function that is safe
+ // in the presence of accessors.
+ num_non_undefined = SafeRemoveArrayHoles(this);
+ }
QuickSort(this, 0, num_non_undefined);
diff --git a/V8Binding/v8/src/assembler.cc b/V8Binding/v8/src/assembler.cc
index 5dba75d..9497be8 100644
--- a/V8Binding/v8/src/assembler.cc
+++ b/V8Binding/v8/src/assembler.cc
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -363,7 +363,7 @@ void RelocIterator::next() {
if (SetMode(DebugInfoModeFromTag(top_tag))) return;
} else {
// Otherwise, just skip over the data.
- Advance(kIntSize);
+ Advance(kIntptrSize);
}
} else {
AdvanceReadPC();
@@ -508,7 +508,7 @@ void RelocInfo::Verify() {
// Implementation of ExternalReference
ExternalReference::ExternalReference(Builtins::CFunctionId id)
- : address_(Builtins::c_function_address(id)) {}
+ : address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(Builtins::Name name)
@@ -516,15 +516,15 @@ ExternalReference::ExternalReference(Builtins::Name name)
ExternalReference::ExternalReference(Runtime::FunctionId id)
- : address_(Runtime::FunctionForId(id)->entry) {}
+ : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(Runtime::Function* f)
- : address_(f->entry) {}
+ : address_(Redirect(f->entry)) {}
ExternalReference::ExternalReference(const IC_Utility& ic_utility)
- : address_(ic_utility.address()) {}
+ : address_(Redirect(ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address)
@@ -543,10 +543,21 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
+ExternalReference ExternalReference::perform_gc_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
+}
+
+
ExternalReference ExternalReference::builtin_passed_function() {
return ExternalReference(&Builtins::builtin_passed_function);
}
+
+ExternalReference ExternalReference::random_positive_smi_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
+}
+
+
ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location());
}
@@ -597,6 +608,12 @@ static double mul_two_doubles(double x, double y) {
}
+static int native_compare_doubles(double x, double y) {
+ if (x == y) return 0;
+ return x < y ? 1 : -1;
+}
+
+
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
typedef double BinaryFPOperation(double x, double y);
@@ -614,13 +631,23 @@ ExternalReference ExternalReference::double_fp_operation(
default:
UNREACHABLE();
}
- return ExternalReference(FUNCTION_ADDR(function));
+ // Passing true as 2nd parameter indicates that they return an fp value.
+ return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
}
+ExternalReference ExternalReference::compare_doubles() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
+ false));
+}
+
+
+ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() {
- return ExternalReference(FUNCTION_ADDR(Debug::Break));
+ return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
}
diff --git a/V8Binding/v8/src/assembler.h b/V8Binding/v8/src/assembler.h
index 66f952a..879ee54 100644
--- a/V8Binding/v8/src/assembler.h
+++ b/V8Binding/v8/src/assembler.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
@@ -183,7 +183,7 @@ class RelocInfo BASE_EMBEDDED {
intptr_t data() const { return data_; }
// Apply a relocation by delta bytes
- INLINE(void apply(int delta));
+ INLINE(void apply(intptr_t delta));
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
@@ -265,8 +265,12 @@ class RelocInfoWriter BASE_EMBEDDED {
last_pc_ = pc;
}
- // Max size (bytes) of a written RelocInfo.
- static const int kMaxSize = 12;
+ // Max size (bytes) of a written RelocInfo. Longest encoding is
+ // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
+ // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
+ // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
+ // Here we use the maximum of the two.
+ static const int kMaxSize = 16;
private:
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
@@ -352,10 +356,15 @@ class SCTableReference;
class Debug_Address;
#endif
-// An ExternalReference represents a C++ address called from the generated
-// code. All references to C++ functions and must be encapsulated in an
-// ExternalReference instance. This is done in order to track the origin of
-// all external references in the code.
+
+typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
+
+
+// An ExternalReference represents a C++ address used in the generated
+// code. All references to C++ functions and variables must be encapsulated in
+// an ExternalReference instance. This is done in order to track the origin of
+// all external references in the code so that they can be bound to the correct
+// addresses when deserializing a heap.
class ExternalReference BASE_EMBEDDED {
public:
explicit ExternalReference(Builtins::CFunctionId id);
@@ -382,7 +391,9 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
+ static ExternalReference perform_gc_function();
static ExternalReference builtin_passed_function();
+ static ExternalReference random_positive_smi_function();
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
@@ -402,8 +413,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_allocation_limit_address();
static ExternalReference double_fp_operation(Token::Value operation);
+ static ExternalReference compare_doubles();
- Address address() const {return address_;}
+ Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
@@ -413,11 +425,30 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_step_in_fp_address();
#endif
+ // This lets you register a function that rewrites all external references.
+ // Used by the ARM simulator to catch calls to external references.
+ static void set_redirector(ExternalReferenceRedirector* redirector) {
+ ASSERT(redirector_ == NULL); // We can't stack them.
+ redirector_ = redirector;
+ }
+
private:
explicit ExternalReference(void* address)
- : address_(reinterpret_cast<Address>(address)) {}
+ : address_(address) {}
+
+ static ExternalReferenceRedirector* redirector_;
+
+ static void* Redirect(void* address, bool fp_return = false) {
+ if (redirector_ == NULL) return address;
+ return (*redirector_)(address, fp_return);
+ }
+
+ static void* Redirect(Address address_arg, bool fp_return = false) {
+ void* address = reinterpret_cast<void*>(address_arg);
+ return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
+ }
- Address address_;
+ void* address_;
};
diff --git a/V8Binding/v8/src/ast.cc b/V8Binding/v8/src/ast.cc
index eef8da7..d8a3232 100644
--- a/V8Binding/v8/src/ast.cc
+++ b/V8Binding/v8/src/ast.cc
@@ -68,7 +68,7 @@ VariableProxy::VariableProxy(Handle<String> name,
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
// at least one access, otherwise no need for a VariableProxy
- var_uses_.RecordAccess(1);
+ var_uses_.RecordRead(1);
}
diff --git a/V8Binding/v8/src/ast.h b/V8Binding/v8/src/ast.h
index 80a4aa5..15d762f 100644
--- a/V8Binding/v8/src/ast.h
+++ b/V8Binding/v8/src/ast.h
@@ -802,13 +802,20 @@ class VariableProxy: public Expression {
Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
}
+
virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
+
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
+ bool IsArguments() {
+ Variable* variable = AsVariable();
+ return (variable == NULL) ? false : variable->is_arguments();
+ }
+
// If this assertion fails it means that some code has tried to
// treat the special "this" variable as an ordinary variable with
// the name "this".
@@ -890,12 +897,13 @@ class Slot: public Expression {
virtual void Accept(AstVisitor* v);
// Type testing & conversion
- virtual Slot* AsSlot() { return this; }
+ virtual Slot* AsSlot() { return this; }
// Accessors
- Variable* var() const { return var_; }
- Type type() const { return type_; }
- int index() const { return index_; }
+ Variable* var() const { return var_; }
+ Type type() const { return type_; }
+ int index() const { return index_; }
+ bool is_arguments() const { return var_->is_arguments(); }
private:
Variable* var_;
diff --git a/V8Binding/v8/src/bootstrapper.cc b/V8Binding/v8/src/bootstrapper.cc
index 89c92b0..ffd432a 100644
--- a/V8Binding/v8/src/bootstrapper.cc
+++ b/V8Binding/v8/src/bootstrapper.cc
@@ -580,8 +580,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
js_global_function->initial_map()->set_is_hidden_prototype();
SetExpectedNofProperties(js_global_function, 100);
- object = Handle<JSGlobalObject>::cast(
- Factory::NewJSObject(js_global_function, TENURED));
+ object = Factory::NewJSGlobalObject(js_global_function);
}
// Set the global context for the global object.
@@ -1113,8 +1112,8 @@ bool Genesis::InstallNatives() {
}
#ifdef V8_HOST_ARCH_64_BIT
- // TODO(X64): Reenable remaining initialization when code generation works.
- return true;
+ // TODO(X64): Remove this when inline caches work.
+ FLAG_use_ic = false;
#endif // V8_HOST_ARCH_64_BIT
@@ -1214,6 +1213,17 @@ bool Genesis::InstallSpecialObjects() {
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
+ if (FLAG_capture_stack_traces) {
+ Handle<Object> Error = GetProperty(js_global, "Error");
+ if (Error->IsJSObject()) {
+ Handle<String> name = Factory::LookupAsciiSymbol("captureStackTraces");
+ SetProperty(Handle<JSObject>::cast(Error),
+ name,
+ Factory::true_value(),
+ NONE);
+ }
+ }
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
@@ -1445,6 +1455,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i));
+ if (value->IsJSGlobalPropertyCell()) {
+ value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
+ }
PropertyDetails details = properties->DetailsAt(i);
SetProperty(to, key, value, details.attributes());
}
diff --git a/V8Binding/v8/src/builtins.cc b/V8Binding/v8/src/builtins.cc
index 1c43f7a..0648e54 100644
--- a/V8Binding/v8/src/builtins.cc
+++ b/V8Binding/v8/src/builtins.cc
@@ -720,7 +720,8 @@ void Builtins::Setup(bool create_heap_objects) {
// bootstrapper.
Bootstrapper::AddFixup(Code::cast(code), &masm);
// Log the event and add the code to the builtins array.
- LOG(CodeCreateEvent("Builtin", Code::cast(code), functions[i].s_name));
+ LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), functions[i].s_name));
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
diff --git a/V8Binding/v8/src/builtins.h b/V8Binding/v8/src/builtins.h
index 6e0f832..0f4a610 100644
--- a/V8Binding/v8/src/builtins.h
+++ b/V8Binding/v8/src/builtins.h
@@ -51,6 +51,7 @@ namespace internal {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\
@@ -210,6 +211,7 @@ class Builtins : public AllStatic {
static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
static void Generate_JSConstructCall(MacroAssembler* masm);
+ static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
diff --git a/V8Binding/v8/src/code-stubs.cc b/V8Binding/v8/src/code-stubs.cc
index b14ede1..ee60332 100644
--- a/V8Binding/v8/src/code-stubs.cc
+++ b/V8Binding/v8/src/code-stubs.cc
@@ -66,7 +66,7 @@ Handle<Code> CodeStub::GetCode() {
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm);
- LOG(CodeCreateEvent("Stub", *code, GetName()));
+ LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
@@ -133,6 +133,10 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) {
return "InvokeBuiltin";
case JSExit:
return "JSExit";
+ case ConvertToDouble:
+ return "ConvertToDouble";
+ case WriteInt32ToHeapNumber:
+ return "WriteInt32ToHeapNumber";
default:
UNREACHABLE();
return NULL;
diff --git a/V8Binding/v8/src/code-stubs.h b/V8Binding/v8/src/code-stubs.h
index 183a64a..76ec787 100644
--- a/V8Binding/v8/src/code-stubs.h
+++ b/V8Binding/v8/src/code-stubs.h
@@ -41,6 +41,8 @@ class CodeStub BASE_EMBEDDED {
SmiOp,
Compare,
RecordWrite, // Last stub that allows stub calls inside.
+ ConvertToDouble,
+ WriteInt32ToHeapNumber,
StackCheck,
UnarySub,
RevertToNumber,
diff --git a/V8Binding/v8/src/codegen.cc b/V8Binding/v8/src/codegen.cc
index f46269f..b7297d7 100644
--- a/V8Binding/v8/src/codegen.cc
+++ b/V8Binding/v8/src/codegen.cc
@@ -225,7 +225,7 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
- if (!Logger::IsEnabled()) return false;
+ if (!Logger::is_logging()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
@@ -302,12 +302,12 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
}
// Function compilation complete.
- LOG(CodeCreateEvent("Function", *code, *node->name()));
+ LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
#ifdef ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*node->name(),
- code->address(),
- code->ExecutableSize());
+ code->instruction_start(),
+ code->instruction_size());
#endif
}
@@ -416,13 +416,18 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateIsSmi, "_IsSmi"},
{&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
{&CodeGenerator::GenerateIsArray, "_IsArray"},
+ {&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
{&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
{&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
+ {&CodeGenerator::GenerateClassOf, "_ClassOf"},
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
- {&CodeGenerator::GenerateLog, "_Log"}
+ {&CodeGenerator::GenerateLog, "_Log"},
+ {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
+ {&CodeGenerator::GenerateMathSin, "_Math_sin"},
+ {&CodeGenerator::GenerateMathCos, "_Math_cos"}
};
@@ -469,129 +474,6 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
}
-void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index) {
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- // Label pointer per number in range.
- SmartPointer<Label*> case_targets(NewArray<Label*>(range));
-
- // Label per switch case.
- SmartPointer<Label> case_labels(NewArray<Label>(length));
-
- Label* fail_label =
- default_index >= 0 ? &(case_labels[default_index]) : NULL;
-
- // Populate array of label pointers for each number in the range.
- // Initally put the failure label everywhere.
- for (int i = 0; i < range; i++) {
- case_targets[i] = fail_label;
- }
-
- // Overwrite with label of a case for the number value of that case.
- // (In reverse order, so that if the same label occurs twice, the
- // first one wins).
- for (int i = length - 1; i >= 0 ; i--) {
- CaseClause* clause = cases->at(i);
- if (!clause->is_default()) {
- Object* label_value = *(clause->label()->AsLiteral()->handle());
- int case_value = Smi::cast(label_value)->value();
- case_targets[case_value - min_index] = &(case_labels[i]);
- }
- }
-
- GenerateFastCaseSwitchJumpTable(node,
- min_index,
- range,
- fail_label,
- Vector<Label*>(*case_targets, range),
- Vector<Label>(*case_labels, length));
-}
-
-
-void CodeGenerator::GenerateFastCaseSwitchCases(
- SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame) {
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- for (int i = 0; i < length; i++) {
- Comment cmnt(masm(), "[ Case clause");
-
- // We may not have a virtual frame if control flow did not fall
- // off the end of the previous case. In that case, use the start
- // frame. Otherwise, we have to merge the existing one to the
- // start frame as part of the previous case.
- if (!has_valid_frame()) {
- RegisterFile empty;
- SetFrame(new VirtualFrame(start_frame), &empty);
- } else {
- frame_->MergeTo(start_frame);
- }
- masm()->bind(&case_labels[i]);
- VisitStatements(cases->at(i)->statements());
- }
-}
-
-
-bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
- // TODO(238): Due to issue 238, fast case switches can crash on ARM
- // and possibly IA32. They are disabled for now.
- // See http://code.google.com/p/v8/issues/detail?id=238
- return false;
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
-
- if (length < FastCaseSwitchMinCaseCount()) {
- return false;
- }
-
- // Test whether fast-case should be used.
- int default_index = -1;
- int min_index = Smi::kMaxValue;
- int max_index = Smi::kMinValue;
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
- if (clause->is_default()) {
- if (default_index >= 0) {
- // There is more than one default label. Defer to the normal case
- // for error.
- return false;
- }
- default_index = i;
- } else {
- Expression* label = clause->label();
- Literal* literal = label->AsLiteral();
- if (literal == NULL) {
- return false; // fail fast case
- }
- Object* value = *(literal->handle());
- if (!value->IsSmi()) {
- return false;
- }
- int int_value = Smi::cast(value)->value();
- min_index = Min(int_value, min_index);
- max_index = Max(int_value, max_index);
- }
- }
-
- // All labels are known to be Smis.
- int range = max_index - min_index + 1; // |min..max| inclusive
- if (range / FastCaseSwitchMaxOverheadFactor() > length) {
- return false; // range of labels is too sparse
- }
-
- // Optimization accepted, generate code.
- GenerateFastCaseSwitchStatement(node, min_index, range, default_index);
- return true;
-}
-
-
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
int pos = fun->start_position();
diff --git a/V8Binding/v8/src/codegen.h b/V8Binding/v8/src/codegen.h
index e1758e1..243d87c 100644
--- a/V8Binding/v8/src/codegen.h
+++ b/V8Binding/v8/src/codegen.h
@@ -61,12 +61,6 @@
// FindInlineRuntimeLUT
// CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry
-// GenerateFastCaseSwitchStatement
-// GenerateFastCaseSwitchCases
-// TryGenerateFastCaseSwitchStatement
-// GenerateFastCaseSwitchJumpTable
-// FastCaseSwitchMinCaseCount
-// FastCaseSwitchMaxOverheadFactor
// CodeForFunctionPosition
// CodeForReturnPosition
// CodeForStatementPosition
@@ -83,6 +77,8 @@ enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
+#else
+#error Unsupported target architecture.
#endif
#include "register-allocator.h"
@@ -228,19 +224,63 @@ class StackCheckStub : public CodeStub {
};
+class InstanceofStub: public CodeStub {
+ public:
+ InstanceofStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return Instanceof; }
+ int MinorKey() { return 0; }
+};
+
+
class UnarySubStub : public CodeStub {
public:
- UnarySubStub() { }
+ explicit UnarySubStub(bool overwrite)
+ : overwrite_(overwrite) { }
private:
+ bool overwrite_;
Major MajorKey() { return UnarySub; }
- int MinorKey() { return 0; }
+ int MinorKey() { return overwrite_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "UnarySubStub"; }
};
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey();
+
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch);
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CompareStub (cc %d), (strict %s)\n",
+ static_cast<int>(cc_),
+ strict_ ? "true" : "false");
+ }
+#endif
+};
+
+
class CEntryStub : public CodeStub {
public:
CEntryStub() { }
diff --git a/V8Binding/v8/src/compilation-cache.cc b/V8Binding/v8/src/compilation-cache.cc
index 421b676..fd706af 100644
--- a/V8Binding/v8/src/compilation-cache.cc
+++ b/V8Binding/v8/src/compilation-cache.cc
@@ -32,28 +32,123 @@
namespace v8 {
namespace internal {
-enum {
- // The number of script generations tell how many GCs a script can
- // survive in the compilation cache, before it will be flushed if it
- // hasn't been used.
- NUMBER_OF_SCRIPT_GENERATIONS = 5,
-
- // The compilation cache consists of tables - one for each entry
- // kind plus extras for the script generations.
- NUMBER_OF_TABLE_ENTRIES =
- CompilationCache::LAST_ENTRY + NUMBER_OF_SCRIPT_GENERATIONS
+
+// The number of sub caches covering the different types to cache.
+static const int kSubCacheCount = 4;
+
+// The number of generations for each sub cache.
+static const int kScriptGenerations = 5;
+static const int kEvalGlobalGenerations = 2;
+static const int kEvalContextualGenerations = 2;
+static const int kRegExpGenerations = 2;
+
+// Initial of each compilation cache table allocated.
+static const int kInitialCacheSize = 64;
+
+// The compilation cache consists of several generational sub-caches which uses
+// this class as a base class. A sub-cache contains a compilation cache tables
+// for each generation of the sub-cache. As the same source code string has
+// different compiled code for scripts and evals. Internally, we use separate
+// sub-caches to avoid getting the wrong kind of result when looking up.
+class CompilationSubCache {
+ public:
+ explicit CompilationSubCache(int generations): generations_(generations) {
+ tables_ = NewArray<Object*>(generations);
+ }
+
+ // Get the compilation cache tables for a specific generation.
+ Handle<CompilationCacheTable> GetTable(int generation);
+
+ // Age the sub-cache by evicting the oldest generation and creating a new
+ // young generation.
+ void Age();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+
+ // Clear this sub-cache evicting all its content.
+ void Clear();
+
+ // Number of generations in this sub-cache.
+ inline int generations() { return generations_; }
+
+ private:
+ int generations_; // Number of generations.
+ Object** tables_; // Compilation cache tables - one for each generation.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
+// Sub-cache for scripts.
+class CompilationCacheScript : public CompilationSubCache {
+ public:
+ explicit CompilationCacheScript(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<JSFunction> Lookup(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+ void Put(Handle<String> source, Handle<JSFunction> boilerplate);
+
+ private:
+ bool HasOrigin(Handle<JSFunction> boilerplate,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
+};
+
+
+// Sub-cache for eval scripts.
+class CompilationCacheEval: public CompilationSubCache {
+ public:
+ explicit CompilationCacheEval(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
+
+ void Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
+};
+
+
+// Sub-cache for regular expressions.
+class CompilationCacheRegExp: public CompilationSubCache {
+ public:
+ explicit CompilationCacheRegExp(int generations)
+ : CompilationSubCache(generations) { }
+
+ Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
+
+ void Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
+};
+
+
+// Statically allocate all the sub-caches.
+static CompilationCacheScript script(kScriptGenerations);
+static CompilationCacheEval eval_global(kEvalGlobalGenerations);
+static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
+static CompilationCacheRegExp reg_exp(kRegExpGenerations);
+static CompilationSubCache* subcaches[kSubCacheCount] =
+ {&script, &eval_global, &eval_contextual, &reg_exp};
+
+
// Current enable state of the compilation cache.
static bool enabled = true;
static inline bool IsEnabled() {
return FLAG_compilation_cache && enabled;
}
-// Keep separate tables for the different entry kinds.
-static Object* tables[NUMBER_OF_TABLE_ENTRIES] = { 0, };
-
static Handle<CompilationCacheTable> AllocateTable(int size) {
CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
@@ -61,54 +156,40 @@ static Handle<CompilationCacheTable> AllocateTable(int size) {
}
-static Handle<CompilationCacheTable> GetTable(int index) {
- ASSERT(index >= 0 && index < NUMBER_OF_TABLE_ENTRIES);
+Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
+ ASSERT(generation < generations_);
Handle<CompilationCacheTable> result;
- if (tables[index]->IsUndefined()) {
- static const int kInitialCacheSize = 64;
+ if (tables_[generation]->IsUndefined()) {
result = AllocateTable(kInitialCacheSize);
- tables[index] = *result;
+ tables_[generation] = *result;
} else {
- CompilationCacheTable* table = CompilationCacheTable::cast(tables[index]);
+ CompilationCacheTable* table =
+ CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table);
}
return result;
}
-static Handle<JSFunction> Lookup(Handle<String> source,
- Handle<Context> context,
- CompilationCache::Entry entry) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result;
- { HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(entry);
- result = table->LookupEval(*source, *context);
- }
- if (result->IsJSFunction()) {
- return Handle<JSFunction>(JSFunction::cast(result));
- } else {
- return Handle<JSFunction>::null();
+void CompilationSubCache::Age() {
+ // Age the generations implicitly killing off the oldest.
+ for (int i = generations_ - 1; i > 0; i--) {
+ tables_[i] = tables_[i - 1];
}
+
+ // Set the first generation as unborn.
+ tables_[0] = Heap::undefined_value();
}
-static Handle<FixedArray> Lookup(Handle<String> source,
- JSRegExp::Flags flags) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- Object* result;
- { HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(CompilationCache::REGEXP);
- result = table->LookupRegExp(*source, flags);
- }
- if (result->IsFixedArray()) {
- return Handle<FixedArray>(FixedArray::cast(result));
- } else {
- return Handle<FixedArray>::null();
+void CompilationSubCache::Iterate(ObjectVisitor* v) {
+ v->VisitPointers(&tables_[0], &tables_[generations_]);
+}
+
+
+void CompilationSubCache::Clear() {
+ for (int i = 0; i < generations_; i++) {
+ tables_[i] = Heap::undefined_value();
}
}
@@ -116,10 +197,10 @@ static Handle<FixedArray> Lookup(Handle<String> source,
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
-static bool HasOrigin(Handle<JSFunction> boilerplate,
- Handle<Object> name,
- int line_offset,
- int column_offset) {
+bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset) {
Handle<Script> script =
Handle<Script>(Script::cast(boilerplate->shared()->script()));
// If the script name isn't set, the boilerplate script should have
@@ -141,24 +222,17 @@ static bool HasOrigin(Handle<JSFunction> boilerplate,
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
+Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
- if (!IsEnabled()) {
- return Handle<JSFunction>::null();
- }
-
- // Use an int for the generation index, so value range propagation
- // in gcc 4.3+ won't assume it can only go up to LAST_ENTRY when in
- // fact it can go up to SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS.
- int generation = SCRIPT;
Object* result = NULL;
+ int generation;
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope;
- while (generation < SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS) {
+ for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source));
if (probe->IsJSFunction()) {
@@ -170,20 +244,18 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
break;
}
}
- // Go to the next generation.
- generation++;
}
}
static void* script_histogram = StatsTable::CreateHistogram(
"V8.ScriptCache",
0,
- NUMBER_OF_SCRIPT_GENERATIONS,
- NUMBER_OF_SCRIPT_GENERATIONS + 1);
+ kScriptGenerations,
+ kScriptGenerations + 1);
if (script_histogram != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
- StatsTable::AddHistogramSample(script_histogram, generation - SCRIPT);
+ StatsTable::AddHistogramSample(script_histogram, generation);
}
// Once outside the manacles of the handle scope, we need to recheck
@@ -194,7 +266,7 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
- if (generation != SCRIPT) PutScript(source, boilerplate);
+ if (generation != 0) Put(source, boilerplate);
Counters::compilation_cache_hits.Increment();
return boilerplate;
} else {
@@ -204,19 +276,118 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
}
+void CompilationCacheScript::Put(Handle<String> source,
+ Handle<JSFunction> boilerplate) {
+ HandleScope scope;
+ ASSERT(boilerplate->IsBoilerplate());
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+}
+
+
+Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
+ Handle<Context> context) {
+ // Make sure not to leak the table into the surrounding handle
+ // scope. Otherwise, we risk keeping old tables around even after
+ // having cleared the cache.
+ Object* result = NULL;
+ int generation;
+ { HandleScope scope;
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupEval(*source, *context);
+ if (result->IsJSFunction()) {
+ break;
+ }
+ }
+ }
+ if (result->IsJSFunction()) {
+ Handle<JSFunction> boilerplate(JSFunction::cast(result));
+ if (generation != 0) {
+ Put(source, context, boilerplate);
+ }
+ Counters::compilation_cache_hits.Increment();
+ return boilerplate;
+ } else {
+ Counters::compilation_cache_misses.Increment();
+ return Handle<JSFunction>::null();
+ }
+}
+
+
+void CompilationCacheEval::Put(Handle<String> source,
+ Handle<Context> context,
+ Handle<JSFunction> boilerplate) {
+ HandleScope scope;
+ ASSERT(boilerplate->IsBoilerplate());
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+}
+
+
+Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
+ JSRegExp::Flags flags) {
+ // Make sure not to leak the table into the surrounding handle
+ // scope. Otherwise, we risk keeping old tables around even after
+ // having cleared the cache.
+ Object* result = NULL;
+ int generation;
+ { HandleScope scope;
+ for (generation = 0; generation < generations(); generation++) {
+ Handle<CompilationCacheTable> table = GetTable(generation);
+ result = table->LookupRegExp(*source, flags);
+ if (result->IsFixedArray()) {
+ break;
+ }
+ }
+ }
+ if (result->IsFixedArray()) {
+ Handle<FixedArray> data(FixedArray::cast(result));
+ if (generation != 0) {
+ Put(source, flags, data);
+ }
+ Counters::compilation_cache_hits.Increment();
+ return data;
+ } else {
+ Counters::compilation_cache_misses.Increment();
+ return Handle<FixedArray>::null();
+ }
+}
+
+
+void CompilationCacheRegExp::Put(Handle<String> source,
+ JSRegExp::Flags flags,
+ Handle<FixedArray> data) {
+ HandleScope scope;
+ Handle<CompilationCacheTable> table = GetTable(0);
+ CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+}
+
+
+Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
+ Handle<Object> name,
+ int line_offset,
+ int column_offset) {
+ if (!IsEnabled()) {
+ return Handle<JSFunction>::null();
+ }
+
+ return script.Lookup(source, name, line_offset, column_offset);
+}
+
+
Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
Handle<Context> context,
- Entry entry) {
+ bool is_global) {
if (!IsEnabled()) {
return Handle<JSFunction>::null();
}
- ASSERT(entry == EVAL_GLOBAL || entry == EVAL_CONTEXTUAL);
- Handle<JSFunction> result = Lookup(source, context, entry);
- if (result.is_null()) {
- Counters::compilation_cache_misses.Increment();
+ Handle<JSFunction> result;
+ if (is_global) {
+ result = eval_global.Lookup(source, context);
} else {
- Counters::compilation_cache_hits.Increment();
+ result = eval_contextual.Lookup(source, context);
}
return result;
}
@@ -228,13 +399,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null();
}
- Handle<FixedArray> result = Lookup(source, flags);
- if (result.is_null()) {
- Counters::compilation_cache_misses.Increment();
- } else {
- Counters::compilation_cache_hits.Increment();
- }
- return result;
+ return reg_exp.Lookup(source, flags);
}
@@ -244,16 +409,14 @@ void CompilationCache::PutScript(Handle<String> source,
return;
}
- HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(SCRIPT);
- CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+ script.Put(source, boilerplate);
}
void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context,
- Entry entry,
+ bool is_global,
Handle<JSFunction> boilerplate) {
if (!IsEnabled()) {
return;
@@ -261,8 +424,11 @@ void CompilationCache::PutEval(Handle<String> source,
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
- Handle<CompilationCacheTable> table = GetTable(entry);
- CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+ if (is_global) {
+ eval_global.Put(source, context, boilerplate);
+ } else {
+ eval_contextual.Put(source, context, boilerplate);
+ }
}
@@ -274,31 +440,27 @@ void CompilationCache::PutRegExp(Handle<String> source,
return;
}
- HandleScope scope;
- Handle<CompilationCacheTable> table = GetTable(REGEXP);
- CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+ reg_exp.Put(source, flags, data);
}
void CompilationCache::Clear() {
- for (int i = 0; i < NUMBER_OF_TABLE_ENTRIES; i++) {
- tables[i] = Heap::undefined_value();
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Clear();
}
}
void CompilationCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables[0], &tables[NUMBER_OF_TABLE_ENTRIES]);
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Iterate(v);
+ }
}
void CompilationCache::MarkCompactPrologue() {
- ASSERT(LAST_ENTRY == SCRIPT);
- for (int i = NUMBER_OF_TABLE_ENTRIES - 1; i > SCRIPT; i--) {
- tables[i] = tables[i - 1];
- }
- for (int j = 0; j <= LAST_ENTRY; j++) {
- tables[j] = Heap::undefined_value();
+ for (int i = 0; i < kSubCacheCount; i++) {
+ subcaches[i]->Age();
}
}
diff --git a/V8Binding/v8/src/compilation-cache.h b/V8Binding/v8/src/compilation-cache.h
index 4545def..3487c08 100644
--- a/V8Binding/v8/src/compilation-cache.h
+++ b/V8Binding/v8/src/compilation-cache.h
@@ -34,20 +34,9 @@ namespace internal {
// The compilation cache keeps function boilerplates for compiled
// scripts and evals. The boilerplates are looked up using the source
-// string as the key.
+// string as the key. For regular expressions the compilation data is cached.
class CompilationCache {
public:
- // The same source code string has different compiled code for
- // scripts and evals. Internally, we use separate caches to avoid
- // getting the wrong kind of entry when looking up.
- enum Entry {
- EVAL_GLOBAL,
- EVAL_CONTEXTUAL,
- REGEXP,
- SCRIPT,
- LAST_ENTRY = SCRIPT
- };
-
// Finds the script function boilerplate for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
@@ -61,7 +50,7 @@ class CompilationCache {
// contain a script for the given source string.
static Handle<JSFunction> LookupEval(Handle<String> source,
Handle<Context> context,
- Entry entry);
+ bool is_global);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -77,7 +66,7 @@ class CompilationCache {
// with the boilerplate. This may overwrite an existing mapping.
static void PutEval(Handle<String> source,
Handle<Context> context,
- Entry entry,
+ bool is_global,
Handle<JSFunction> boilerplate);
// Associate the (source, flags) pair to the given regexp data.
diff --git a/V8Binding/v8/src/compiler.cc b/V8Binding/v8/src/compiler.cc
index ea7c134..aecdfb9 100644
--- a/V8Binding/v8/src/compiler.cc
+++ b/V8Binding/v8/src/compiler.cc
@@ -175,17 +175,21 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
// Log the code generation for the script. Check explicit whether logging is
// to avoid allocating when not required.
- if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+ if (Logger::is_logging() || OProfileAgent::is_enabled()) {
if (script->name()->IsString()) {
SmartPointer<char> data =
String::cast(script->name())->ToCString(DISALLOW_NULLS);
- LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, *data));
- OProfileAgent::CreateNativeCodeRegion(*data, code->address(),
- code->ExecutableSize());
+ LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, *data));
+ OProfileAgent::CreateNativeCodeRegion(*data,
+ code->instruction_start(),
+ code->instruction_size());
} else {
- LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, ""));
+ LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, ""));
OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
- code->address(), code->ExecutableSize());
+ code->instruction_start(),
+ code->instruction_size());
}
}
#endif
@@ -291,14 +295,11 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
- CompilationCache::Entry entry = is_global
- ? CompilationCache::EVAL_GLOBAL
- : CompilationCache::EVAL_CONTEXTUAL;
// Do a lookup in the compilation cache; if the entry is not there,
// invoke the compiler and add the result to the cache.
Handle<JSFunction> result =
- CompilationCache::LookupEval(source, context, entry);
+ CompilationCache::LookupEval(source, context, is_global);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
@@ -310,7 +311,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
NULL,
NULL);
if (!result.is_null()) {
- CompilationCache::PutEval(source, context, entry, result);
+ CompilationCache::PutEval(source, context, is_global, result);
}
}
@@ -372,24 +373,23 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Log the code generation. If source information is available include script
// name and line number. Check explicit whether logging is enabled as finding
// the line number is not for free.
- if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+ if (Logger::is_logging() || OProfileAgent::is_enabled()) {
Handle<String> func_name(name->length() > 0 ?
*name : shared->inferred_name());
if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, start_position);
- if (line_num > 0) {
- line_num += script->line_offset()->value() + 1;
- }
- LOG(CodeCreateEvent("LazyCompile", *code, *func_name,
+ int line_num = GetScriptLineNumber(script, start_position) + 1;
+ LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name,
String::cast(script->name()),
- line_num, code->address(),
- code->ExecutableSize());
+ line_num,
+ code->instruction_start(),
+ code->instruction_size());
} else {
- LOG(CodeCreateEvent("LazyCompile", *code, *func_name));
- OProfileAgent::CreateNativeCodeRegion(*func_name, code->address(),
- code->ExecutableSize());
+ LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
+ OProfileAgent::CreateNativeCodeRegion(*func_name,
+ code->instruction_start(),
+ code->instruction_size());
}
}
#endif
diff --git a/V8Binding/v8/src/contexts.cc b/V8Binding/v8/src/contexts.cc
index 873c23c..ead73ee 100644
--- a/V8Binding/v8/src/contexts.cc
+++ b/V8Binding/v8/src/contexts.cc
@@ -149,7 +149,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// check parameter locals in context
int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
if (param_index >= 0) {
- // slot found
+ // slot found.
int index =
ScopeInfo<>::ContextSlotIndex(*code,
Heap::arguments_shadow_symbol(),
diff --git a/V8Binding/v8/src/conversions.cc b/V8Binding/v8/src/conversions.cc
index 7f63d9b..2a3db7b 100644
--- a/V8Binding/v8/src/conversions.cc
+++ b/V8Binding/v8/src/conversions.cc
@@ -327,7 +327,7 @@ static double InternalStringToDouble(S* str,
index++;
if (!SubStringEquals(str, index, "Infinity"))
return JUNK_STRING_VALUE;
- result = is_negative ? -INFINITY : INFINITY;
+ result = is_negative ? -V8_INFINITY : V8_INFINITY;
index += 8;
}
}
diff --git a/V8Binding/v8/src/d8.cc b/V8Binding/v8/src/d8.cc
index ee845ee..e02c80a 100644
--- a/V8Binding/v8/src/d8.cc
+++ b/V8Binding/v8/src/d8.cc
@@ -460,6 +460,16 @@ void Shell::Initialize() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access.
i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
+
+ // Start the debugger agent if requested.
+ if (i::FLAG_debugger_agent) {
+ v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
+ }
+
+ // Start the in-process debugger if requested.
+ if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
+ v8::Debug::SetDebugEventListener(HandleDebugEvent);
+ }
#endif
}
@@ -721,16 +731,6 @@ int Shell::Main(int argc, char* argv[]) {
RunRemoteDebugger(i::FLAG_debugger_port);
return 0;
}
-
- // Start the debugger agent if requested.
- if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
- }
-
- // Start the in-process debugger if requested.
- if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
- v8::Debug::SetDebugEventListener(HandleDebugEvent);
- }
#endif
}
if (run_shell)
diff --git a/V8Binding/v8/src/d8.js b/V8Binding/v8/src/d8.js
index a8db9e1..2d52170 100644
--- a/V8Binding/v8/src/d8.js
+++ b/V8Binding/v8/src/d8.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// How crappy is it that I have to implement completely basic stuff
-// like this myself? Answer: very.
String.prototype.startsWith = function (str) {
if (str.length > this.length)
return false;
@@ -100,6 +98,13 @@ Debug.ScriptCompilationType = { Host: 0,
JSON: 2 };
+// The different types of scopes matching constants runtime.cc.
+Debug.ScopeType = { Global: 0,
+ Local: 1,
+ With: 2,
+ Closure: 3 };
+
+
// Current debug state.
const kNoFrame = -1;
Debug.State = {
@@ -297,6 +302,14 @@ function DebugRequest(cmd_line) {
this.request_ = this.frameCommandToJSONRequest_(args);
break;
+ case 'scopes':
+ this.request_ = this.scopesCommandToJSONRequest_(args);
+ break;
+
+ case 'scope':
+ this.request_ = this.scopeCommandToJSONRequest_(args);
+ break;
+
case 'print':
case 'p':
this.request_ = this.printCommandToJSONRequest_(args);
@@ -396,13 +409,17 @@ DebugRequest.prototype.createRequest = function(command) {
// Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
+ // Global varaible used to store whether a handle was requested.
+ lookup_handle = null;
// Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) {
+ // Remember the handle requested in a global variable.
+ lookup_handle = parseInt(handle_match[1]);
// Build a lookup request.
var request = this.createRequest('lookup');
request.arguments = {};
- request.arguments.handle = parseInt(handle_match[1]);
+ request.arguments.handles = [ lookup_handle ];
return request.toJSONProtocol();
} else {
// Build an evaluate request.
@@ -561,6 +578,27 @@ DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
};
+// Create a JSON request for the scopes command.
+DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
+ // Build a scopes request from the text command.
+ var request = this.createRequest('scopes');
+ return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scope command.
+DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
+ // Build a scope request from the text command.
+ var request = this.createRequest('scope');
+ args = args.split(/\s*[ ]+\s*/g);
+ if (args.length > 0 && args[0].length > 0) {
+ request.arguments = {};
+ request.arguments.number = args[0];
+ }
+ return request.toJSONProtocol();
+};
+
+
// Create a JSON request for the print command.
DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
// Build an evaluate request from the text command.
@@ -785,8 +823,11 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('clear <breakpoint #>');
print('backtrace [n] | [-n] | [from to]');
print('frame <frame #>');
+ print('scopes');
+ print('scope <scope #>');
print('step [in | next | out| min [step count]]');
print('print <expression>');
+ print('dir <expression>');
print('source [from line [num lines]]');
print('scripts');
print('continue');
@@ -796,7 +837,11 @@ DebugRequest.prototype.helpCommand_ = function(args) {
function formatHandleReference_(value) {
- return '#' + value.handle() + '#';
+ if (value.handle() >= 0) {
+ return '#' + value.handle() + '#';
+ } else {
+ return '#Transient#';
+ }
}
@@ -820,10 +865,14 @@ function formatObject_(value, include_properties) {
result += value.propertyName(i);
result += ': ';
var property_value = value.propertyValue(i);
- if (property_value && property_value.type()) {
- result += property_value.type();
- } else {
+ if (property_value instanceof ProtocolReference) {
result += '<no type>';
+ } else {
+ if (property_value && property_value.type()) {
+ result += property_value.type();
+ } else {
+ result += '<no type>';
+ }
}
result += ' ';
result += formatHandleReference_(property_value);
@@ -834,6 +883,33 @@ function formatObject_(value, include_properties) {
}
+function formatScope_(scope) {
+ var result = '';
+ var index = scope.index;
+ result += '#' + (index <= 9 ? '0' : '') + index;
+ result += ' ';
+ switch (scope.type) {
+ case Debug.ScopeType.Global:
+ result += 'Global, ';
+ result += '#' + scope.object.ref + '#';
+ break;
+ case Debug.ScopeType.Local:
+ result += 'Local';
+ break;
+ case Debug.ScopeType.With:
+ result += 'With, ';
+ result += '#' + scope.object.ref + '#';
+ break;
+ case Debug.ScopeType.Closure:
+ result += 'Closure';
+ break;
+ default:
+ result += 'UNKNOWN';
+ }
+ return result;
+}
+
+
// Convert a JSON response to text for display in a text based debugger.
function DebugResponseDetails(response) {
details = {text:'', running:false}
@@ -883,12 +959,41 @@ function DebugResponseDetails(response) {
Debug.State.currentFrame = body.index;
break;
+ case 'scopes':
+ if (body.totalScopes == 0) {
+ result = '(no scopes)';
+ } else {
+ result = 'Scopes #' + body.fromScope + ' to #' +
+ (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
+ for (i = 0; i < body.scopes.length; i++) {
+ if (i != 0) {
+ result += '\n';
+ }
+ result += formatScope_(body.scopes[i]);
+ }
+ }
+ details.text = result;
+ break;
+
+ case 'scope':
+ result += formatScope_(body);
+ result += '\n';
+ var scope_object_value = response.lookup(body.object.ref);
+ result += formatObject_(scope_object_value, true);
+ details.text = result;
+ break;
+
case 'evaluate':
case 'lookup':
if (last_cmd == 'p' || last_cmd == 'print') {
result = body.text;
} else {
- var value = response.bodyValue();
+ var value;
+ if (lookup_handle) {
+ value = response.bodyValue(lookup_handle);
+ } else {
+ value = response.bodyValue();
+ }
if (value.isObject()) {
result += formatObject_(value, true);
} else {
@@ -1105,7 +1210,7 @@ ProtocolPackage.prototype.body = function() {
ProtocolPackage.prototype.bodyValue = function(index) {
- if (index) {
+ if (index != null) {
return new ProtocolValue(this.packet_.body[index], this);
} else {
return new ProtocolValue(this.packet_.body, this);
diff --git a/V8Binding/v8/src/date-delay.js b/V8Binding/v8/src/date-delay.js
index f06e8b7..5a109c6 100644
--- a/V8Binding/v8/src/date-delay.js
+++ b/V8Binding/v8/src/date-delay.js
@@ -28,7 +28,6 @@
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
-// const $isNaN = GlobalIsNaN;
// const $isFinite = GlobalIsFinite;
// -------------------------------------------------------------------
@@ -41,6 +40,11 @@
// changes to these properties.
const $Date = global.Date;
+// Helper function to throw error.
+function ThrowDateTypeError() {
+ throw new $TypeError('this is not a Date object.');
+}
+
// ECMA 262 - 15.9.1.2
function Day(time) {
return FLOOR(time/msPerDay);
@@ -115,7 +119,7 @@ function EquivalentYear(year) {
// - leap year.
// - week day of first day.
var time = TimeFromYear(year);
- var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
+ var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
(WeekDay(time) * 12) % 28;
// Find the year in the range 2008..2037 that is equivalent mod 28.
// Add 3*28 to give a positive argument to the modulus operator.
@@ -129,23 +133,84 @@ function EquivalentTime(t) {
// (measured in whole seconds based on the 1970 epoch).
// We solve this by mapping the time to a year with same leap-year-ness
// and same starting day for the year. The ECMAscript specification says
- // we must do this, but for compatability with other browsers, we use
+ // we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
return TimeClip(MakeDate(day, TimeWithinDay(t)));
}
-var daylight_cache_time = $NaN;
-var daylight_cache_offset;
+// Because computing the DST offset is a pretty expensive operation
+// we keep a cache of last computed offset along with a time interval
+// where we know the cache is valid.
+var DST_offset_cache = {
+ // Cached DST offset.
+ offset: 0,
+ // Time interval where the cached offset is valid.
+ start: 0, end: -1,
+ // Size of next interval expansion.
+ increment: 0
+};
+
+
+// NOTE: The implementation relies on the fact that no time zones have
+// more than one daylight savings offset change per month.
function DaylightSavingsOffset(t) {
- if (t == daylight_cache_time) {
- return daylight_cache_offset;
+ // Load the cache object from the builtins object.
+ var cache = DST_offset_cache;
+
+ // Cache the start and the end in local variables for fast access.
+ var start = cache.start;
+ var end = cache.end;
+
+ if (start <= t) {
+ // If the time fits in the cached interval, return the cached offset.
+ if (t <= end) return cache.offset;
+
+ // Compute a possible new interval end.
+ var new_end = end + cache.increment;
+
+ if (t <= new_end) {
+ var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
+ if (cache.offset == end_offset) {
+ // If the offset at the end of the new interval still matches
+ // the offset in the cache, we grow the cached time interval
+ // and return the offset.
+ cache.end = new_end;
+ cache.increment = msPerMonth;
+ return end_offset;
+ } else {
+ var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+ if (offset == end_offset) {
+ // The offset at the given time is equal to the offset at the
+ // new end of the interval, so that means that we've just skipped
+ // the point in time where the DST offset change occurred. Updated
+ // the interval to reflect this and reset the increment.
+ cache.start = t;
+ cache.end = new_end;
+ cache.increment = msPerMonth;
+ } else {
+ // The interval contains a DST offset change and the given time is
+ // before it. Adjust the increment to avoid a linear search for
+ // the offset change point and change the end of the interval.
+ cache.increment /= 3;
+ cache.end = t;
+ }
+ // Update the offset in the cache and return it.
+ cache.offset = offset;
+ return offset;
+ }
+ }
}
+
+ // Compute the DST offset for the time and shrink the cache interval
+ // to only contain the time. This allows fast repeated DST offset
+ // computations for the same time.
var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
- daylight_cache_time = t;
- daylight_cache_offset = offset;
+ cache.offset = offset;
+ cache.start = cache.end = t;
+ cache.increment = msPerMonth;
return offset;
}
@@ -154,7 +219,7 @@ var timezone_cache_time = $NaN;
var timezone_cache_timezone;
function LocalTimezone(t) {
- if(t == timezone_cache_time) {
+ if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
var timezone = %DateLocalTimezone(EquivalentTime(t));
@@ -171,7 +236,7 @@ function WeekDay(time) {
var local_time_offset = %DateLocalTimeOffset();
function LocalTime(time) {
- if ($isNaN(time)) return time;
+ if (NUMBER_IS_NAN(time)) return time;
return time + local_time_offset + DaylightSavingsOffset(time);
}
@@ -181,7 +246,7 @@ function LocalTimeNoCheck(time) {
function UTC(time) {
- if ($isNaN(time)) return time;
+ if (NUMBER_IS_NAN(time)) return time;
var tmp = time - local_time_offset;
return tmp - DaylightSavingsOffset(tmp);
}
@@ -363,7 +428,7 @@ function TimeClip(time) {
%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
// ECMA 262 - 15.9.3
var argc = %_ArgumentsLength();
if (argc == 0) {
@@ -393,7 +458,7 @@ function TimeClip(time) {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+ year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
@@ -407,105 +472,105 @@ function TimeClip(time) {
// Helper functions.
function GetTimeFrom(aDate) {
- if (IS_DATE(aDate)) return %_ValueOf(aDate);
- throw new $TypeError('this is not a Date object.');
+ return DATE_VALUE(aDate);
}
function GetMillisecondsFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return msFromTime(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return msFromTime(t);
}
function GetSecondsFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(t);
}
function GetMinutesFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(t);
}
function GetHoursFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(t);
}
function GetFullYearFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
- return YearFromTime(LocalTimeNoCheck(t));
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
+ // Ignore the DST offset for year computations.
+ return YearFromTime(t + local_time_offset);
}
function GetUTCFullYearFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return YearFromTime(t);
}
function GetMonthFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(t);
}
function GetDateFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
- var t = GetTimeFrom(aDate);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(aDate);
+ if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(t);
}
@@ -597,7 +662,7 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+ year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
@@ -614,24 +679,24 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
}
// ECMA 262 - 15.9.5.3
function DateToDateString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
var lt = LocalTimeNoCheck(t);
return TimeString(lt) + LocalTimezoneString(lt);
}
@@ -645,16 +710,16 @@ function DateToLocaleString() {
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
var lt = LocalTimeNoCheck(t);
return TimeString(lt);
}
@@ -662,13 +727,13 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
- return GetTimeFrom(this);
+ return DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
- return GetTimeFrom(this);
+ return DATE_VALUE(this);
}
@@ -710,16 +775,16 @@ function DateGetUTCDate() {
// ECMA 262 - 15.9.5.16
function DateGetDay() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return t;
+ var t = %_ValueOf(this);
+ if (NUMBER_IS_NAN(t)) return t;
return WeekDay(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return t;
+ var t = %_ValueOf(this);
+ if (NUMBER_IS_NAN(t)) return t;
return WeekDay(t);
}
@@ -774,22 +839,22 @@ function DateGetUTCMilliseconds() {
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return t;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return t;
return (t - LocalTimeNoCheck(t)) / msPerMinute;
}
// ECMA 262 - 15.9.5.27
function DateSetTime(ms) {
- if (!IS_DATE(this)) throw new $TypeError('this is not a Date object.');
+ if (!IS_DATE(this)) ThrowDateTypeError();
return %_SetValueOf(this, TimeClip(ToNumber(ms)));
}
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
@@ -798,7 +863,7 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
@@ -807,7 +872,7 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
@@ -817,7 +882,7 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
@@ -827,7 +892,7 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
@@ -839,7 +904,7 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
@@ -851,7 +916,7 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
@@ -864,7 +929,7 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
@@ -877,7 +942,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
@@ -886,7 +951,7 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
@@ -895,7 +960,7 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
- var t = LocalTime(GetTimeFrom(this));
+ var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
@@ -905,7 +970,7 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
- var t = GetTimeFrom(this);
+ var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
@@ -915,8 +980,8 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
- var t = GetTimeFrom(this);
- t = $isNaN(t) ? 0 : LocalTimeNoCheck(t);
+ var t = DATE_VALUE(this);
+ t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
@@ -928,8 +993,8 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) t = 0;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
@@ -941,8 +1006,8 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return kInvalidDate;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
+ TwoDigitString(DateFromTime(t)) + ' '
@@ -954,18 +1019,18 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
- var t = GetTimeFrom(this);
- if ($isNaN(t)) return $NaN;
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return $NaN;
return YearFromTime(LocalTimeNoCheck(t)) - 1900;
}
// ECMA 262 - B.2.5
function DateSetYear(year) {
- var t = LocalTime(GetTimeFrom(this));
- if ($isNaN(t)) t = 0;
+ var t = LocalTime(DATE_VALUE(this));
+ if (NUMBER_IS_NAN(t)) t = 0;
year = ToNumber(year);
- if ($isNaN(year)) return %_SetValueOf(this, $NaN);
+ if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
diff --git a/V8Binding/v8/src/debug-delay.js b/V8Binding/v8/src/debug-delay.js
index 0b0501f..423a118 100644
--- a/V8Binding/v8/src/debug-delay.js
+++ b/V8Binding/v8/src/debug-delay.js
@@ -388,7 +388,7 @@ ScriptBreakPoint.prototype.clear = function () {
function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) {
if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
- script_break_points[i].script_name() == script.name) {
+ script_break_points[i].matchesScript(script)) {
script_break_points[i].set(script);
}
}
@@ -1194,6 +1194,13 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
throw new Error('Command not specified');
}
+ // TODO(yurys): remove request.arguments.compactFormat check once
+ // ChromeDevTools are switched to 'inlineRefs'
+ if (request.arguments && (request.arguments.inlineRefs ||
+ request.arguments.compactFormat)) {
+ response.setOption('inlineRefs', true);
+ }
+
if (request.command == 'continue') {
this.continueRequest_(request, response);
} else if (request.command == 'break') {
@@ -1208,6 +1215,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.backtraceRequest_(request, response);
} else if (request.command == 'frame') {
this.frameRequest_(request, response);
+ } else if (request.command == 'scopes') {
+ this.scopesRequest_(request, response);
+ } else if (request.command == 'scope') {
+ this.scopeRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
} else if (request.command == 'lookup') {
@@ -1500,9 +1511,6 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
if (from_index < 0 || to_index < 0) {
return response.failed('Invalid frame number');
}
- if (request.arguments.compactFormat) {
- response.setOption('compactFormat', true);
- }
}
// Adjust the index.
@@ -1540,7 +1548,7 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
// With no arguments just keep the selected frame.
if (request.arguments) {
- index = request.arguments.number;
+ var index = request.arguments.number;
if (index < 0 || this.exec_state_.frameCount() <= index) {
return response.failed('Invalid frame number');
}
@@ -1551,6 +1559,67 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
};
+DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+ // Get the frame for which the scope or scopes are requested. With no frameNumber
+ // argument use the currently selected frame.
+ if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
+ frame_index = request.arguments.frameNumber;
+ if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
+ return response.failed('Invalid frame number');
+ }
+ return this.exec_state_.frame(frame_index);
+ } else {
+ return this.exec_state_.frame();
+ }
+}
+
+
+DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
+ // No frames no scopes.
+ if (this.exec_state_.frameCount() == 0) {
+ return response.failed('No scopes');
+ }
+
+ // Get the frame for which the scopes are requested.
+ var frame = this.frameForScopeRequest_(request);
+
+ // Fill all scopes for this frame.
+ var total_scopes = frame.scopeCount();
+ var scopes = [];
+ for (var i = 0; i < total_scopes; i++) {
+ scopes.push(frame.scope(i));
+ }
+ response.body = {
+ fromScope: 0,
+ toScope: total_scopes,
+ totalScopes: total_scopes,
+ scopes: scopes
+ }
+};
+
+
+DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
+ // No frames no scopes.
+ if (this.exec_state_.frameCount() == 0) {
+ return response.failed('No scopes');
+ }
+
+ // Get the frame for which the scope is requested.
+ var frame = this.frameForScopeRequest_(request);
+
+ // With no scope argument just return top scope.
+ var scope_index = 0;
+ if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
+ scope_index = %ToNumber(request.arguments.number);
+ if (scope_index < 0 || frame.scopeCount() <= scope_index) {
+ return response.failed('Invalid scope number');
+ }
+ }
+
+ response.body = frame.scope(scope_index);
+};
+
+
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -1631,10 +1700,6 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
response.setOption('includeSource', includeSource);
}
- if (request.arguments.compactFormat) {
- response.setOption('compactFormat', true);
- }
-
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
diff --git a/V8Binding/v8/src/debug.cc b/V8Binding/v8/src/debug.cc
index 0daf564..e37bfb7 100644
--- a/V8Binding/v8/src/debug.cc
+++ b/V8Binding/v8/src/debug.cc
@@ -382,6 +382,7 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code.
if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
+ if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc());
}
}
@@ -389,6 +390,19 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
+
+ RelocInfo::Mode mode = rmode();
+ if (RelocInfo::IsCodeTarget(mode)) {
+ Address target = original_rinfo()->target_address();
+ Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+
+ // Restore the inlined version of keyed stores to get back to the
+ // fast case. We need to patch back the keyed store because no
+ // patching happens when running normally. For keyed loads, the
+ // map check will get patched back when running normally after ICs
+ // have been cleared at GC.
+ if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
+ }
}
diff --git a/V8Binding/v8/src/disassembler.cc b/V8Binding/v8/src/disassembler.cc
index 95022d0..e2f908d 100644
--- a/V8Binding/v8/src/disassembler.cc
+++ b/V8Binding/v8/src/disassembler.cc
@@ -239,6 +239,13 @@ static int DecodeIt(FILE* f,
InlineCacheState ic_state = code->ic_state();
out.AddFormatted(" %s, %s", Code::Kind2String(kind),
Code::ICState2String(ic_state));
+ if (ic_state == MONOMORPHIC) {
+ PropertyType type = code->type();
+ out.AddFormatted(", %s", Code::PropertyType2String(type));
+ }
+ if (code->ic_in_loop() == IN_LOOP) {
+ out.AddFormatted(", in_loop");
+ }
if (kind == Code::CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
diff --git a/V8Binding/v8/src/dtoa-config.c b/V8Binding/v8/src/dtoa-config.c
index 9fcd0dd..bc0a58a 100644
--- a/V8Binding/v8/src/dtoa-config.c
+++ b/V8Binding/v8/src/dtoa-config.c
@@ -77,6 +77,11 @@
#define __NO_ISOCEXT
#endif /* __MINGW32__ */
+/* On 64-bit systems, we need to make sure that a Long is only 32 bits. */
+#ifdef V8_TARGET_ARCH_X64
+#define Long int
+#endif /* V8_TARGET_ARCH_X64 */
+
/* Make sure we use the David M. Gay version of strtod(). On Linux, we
* cannot use the same name (maybe the function does not have weak
* linkage?). */
diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc
index fa3c2ec..adc1872 100644
--- a/V8Binding/v8/src/execution.cc
+++ b/V8Binding/v8/src/execution.cc
@@ -38,6 +38,8 @@
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
+#else
+#error Unsupported target architecture.
#endif
#include "debug.h"
diff --git a/V8Binding/v8/src/factory.cc b/V8Binding/v8/src/factory.cc
index fad3e9c..bc48ebf 100644
--- a/V8Binding/v8/src/factory.cc
+++ b/V8Binding/v8/src/factory.cc
@@ -92,8 +92,6 @@ Handle<String> Factory::NewRawTwoByteString(int length,
Handle<String> Factory::NewConsString(Handle<String> first,
Handle<String> second) {
- if (first->length() == 0) return second;
- if (second->length() == 0) return first;
CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
}
@@ -621,6 +619,14 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
+Handle<JSGlobalObject> Factory::NewJSGlobalObject(
+ Handle<JSFunction> constructor) {
+ CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalObject(*constructor),
+ JSGlobalObject);
+}
+
+
+
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED),
JSObject);
diff --git a/V8Binding/v8/src/factory.h b/V8Binding/v8/src/factory.h
index 95dbee9..40cf578 100644
--- a/V8Binding/v8/src/factory.h
+++ b/V8Binding/v8/src/factory.h
@@ -183,6 +183,10 @@ class Factory : public AllStatic {
static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
+ // JS global objects are pretenured.
+ static Handle<JSGlobalObject> NewJSGlobalObject(
+ Handle<JSFunction> constructor);
+
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
diff --git a/V8Binding/v8/src/flag-definitions.h b/V8Binding/v8/src/flag-definitions.h
index 13e41e3..983fe22 100644
--- a/V8Binding/v8/src/flag-definitions.h
+++ b/V8Binding/v8/src/flag-definitions.h
@@ -110,6 +110,7 @@ DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_string(natives_file, NULL, "alternative natives file")
DEFINE_bool(expose_gc, false, "expose gc extension")
+DEFINE_bool(capture_stack_traces, false, "capture stack traces")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
@@ -332,6 +333,8 @@ DEFINE_bool(log_gc, false,
DEFINE_bool(log_handles, false, "Log global handle events.")
DEFINE_bool(log_state_changes, false, "Log state changes.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
+DEFINE_bool(compress_log, false,
+ "Compress log to save space (makes log less human-readable).")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,
diff --git a/V8Binding/v8/src/frame-element.h b/V8Binding/v8/src/frame-element.h
index d16eb48..666aabb 100644
--- a/V8Binding/v8/src/frame-element.h
+++ b/V8Binding/v8/src/frame-element.h
@@ -54,8 +54,7 @@ class FrameElement BASE_EMBEDDED {
// The default constructor creates an invalid frame element.
FrameElement() {
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(INVALID)
+ value_ = TypeField::encode(INVALID)
| CopiedField::encode(false)
| SyncedField::encode(false)
| DataField::encode(0);
@@ -75,9 +74,8 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- StaticType static_type = StaticType()) {
- return FrameElement(REGISTER, reg, is_synced, static_type);
+ SyncFlag is_synced) {
+ return FrameElement(REGISTER, reg, is_synced);
}
// Factory function to construct a frame element whose value is known at
@@ -143,15 +141,6 @@ class FrameElement BASE_EMBEDDED {
return DataField::decode(value_);
}
- StaticType static_type() {
- return StaticType(StaticTypeField::decode(value_));
- }
-
- void set_static_type(StaticType static_type) {
- value_ = value_ & ~StaticTypeField::mask();
- value_ = value_ | StaticTypeField::encode(static_type.static_type_);
- }
-
bool Equals(FrameElement other) {
uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
if (!masked_difference) {
@@ -184,13 +173,8 @@ class FrameElement BASE_EMBEDDED {
if (!other->is_valid()) return other;
if (!SameLocation(other)) return NULL;
- // If either is unsynced, the result is. The result static type is
- // the merge of the static types. It's safe to set it on one of the
- // frame elements, and harmless too (because we are only going to
- // merge the reaching frames and will ensure that the types are
- // coherent, and changing the static type does not emit code).
+ // If either is unsynced, the result is.
FrameElement* result = is_synced() ? other : this;
- result->set_static_type(static_type().merge(other->static_type()));
return result;
}
@@ -205,16 +189,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct memory and register elements.
FrameElement(Type type, Register reg, SyncFlag is_synced) {
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(type)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
- }
-
- FrameElement(Type type, Register reg, SyncFlag is_synced, StaticType stype) {
- value_ = StaticTypeField::encode(stype.static_type_)
- | TypeField::encode(type)
+ value_ = TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
@@ -222,8 +197,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct constant elements.
FrameElement(Handle<Object> value, SyncFlag is_synced) {
- value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
- | TypeField::encode(CONSTANT)
+ value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(ConstantList()->length());
@@ -248,14 +222,13 @@ class FrameElement BASE_EMBEDDED {
value_ = value_ | DataField::encode(new_reg.code_);
}
- // Encode static type, type, copied, synced and data in one 32 bit integer.
+ // Encode type, copied, synced and data in one 32 bit integer.
uint32_t value_;
- class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
- class TypeField: public BitField<Type, 3, 3> {};
- class CopiedField: public BitField<uint32_t, 6, 1> {};
- class SyncedField: public BitField<uint32_t, 7, 1> {};
- class DataField: public BitField<uint32_t, 8, 32 - 9> {};
+ class TypeField: public BitField<Type, 0, 3> {};
+ class CopiedField: public BitField<uint32_t, 3, 1> {};
+ class SyncedField: public BitField<uint32_t, 4, 1> {};
+ class DataField: public BitField<uint32_t, 5, 32 - 6> {};
friend class VirtualFrame;
};
diff --git a/V8Binding/v8/src/frames-inl.h b/V8Binding/v8/src/frames-inl.h
index 28be430..b04cf50 100644
--- a/V8Binding/v8/src/frames-inl.h
+++ b/V8Binding/v8/src/frames-inl.h
@@ -36,6 +36,8 @@
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
+#else
+#error Unsupported target architecture.
#endif
namespace v8 {
@@ -43,13 +45,7 @@ namespace internal {
inline Address StackHandler::address() const {
- // NOTE: There's an obvious problem with the address of the NULL
- // stack handler. Right now, it benefits us that the subtraction
- // leads to a very high address (above everything else on the
- // stack), but maybe we should stop relying on it?
- const int displacement = StackHandlerConstants::kAddressDisplacement;
- Address address = reinterpret_cast<Address>(const_cast<StackHandler*>(this));
- return address + displacement;
+ return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
}
@@ -68,13 +64,7 @@ inline bool StackHandler::includes(Address address) const {
inline void StackHandler::Iterate(ObjectVisitor* v) const {
// Stack handlers do not contain any pointers that need to be
- // traversed. The only field that have to worry about is the code
- // field which is unused and should always be uninitialized.
-#ifdef DEBUG
- const int offset = StackHandlerConstants::kCodeOffset;
- Object* code = Memory::Object_at(address() + offset);
- ASSERT(Smi::cast(code)->value() == StackHandler::kCodeNotPresent);
-#endif
+ // traversed.
}
@@ -122,11 +112,6 @@ inline Object* StandardFrame::context() const {
}
-inline Address StandardFrame::caller_sp() const {
- return pp();
-}
-
-
inline Address StandardFrame::caller_fp() const {
return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
}
@@ -157,13 +142,13 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
inline Object* JavaScriptFrame::receiver() const {
const int offset = JavaScriptFrameConstants::kReceiverOffset;
- return Memory::Object_at(pp() + offset);
+ return Memory::Object_at(caller_sp() + offset);
}
inline void JavaScriptFrame::set_receiver(Object* value) {
const int offset = JavaScriptFrameConstants::kReceiverOffset;
- Memory::Object_at(pp() + offset) = value;
+ Memory::Object_at(caller_sp() + offset) = value;
}
diff --git a/V8Binding/v8/src/frames.cc b/V8Binding/v8/src/frames.cc
index dd0ea00..5cd8332 100644
--- a/V8Binding/v8/src/frames.cc
+++ b/V8Binding/v8/src/frames.cc
@@ -49,7 +49,9 @@ class StackHandlerIterator BASE_EMBEDDED {
StackHandler* handler() const { return handler_; }
- bool done() { return handler_->address() > limit_; }
+ bool done() {
+ return handler_ == NULL || handler_->address() > limit_;
+ }
void Advance() {
ASSERT(!done());
handler_ = handler_->next();
@@ -398,7 +400,7 @@ Code* ExitFrame::code() const {
void ExitFrame::ComputeCallerState(State* state) const {
// Setup the caller state.
- state->sp = pp();
+ state->sp = caller_sp();
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address
= reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
@@ -406,7 +408,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kPPDisplacement;
+ return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
@@ -451,12 +453,12 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
Object* JavaScriptFrame::GetParameter(int index) const {
ASSERT(index >= 0 && index < ComputeParametersCount());
const int offset = JavaScriptFrameConstants::kParam0Offset;
- return Memory::Object_at(pp() + offset - (index * kPointerSize));
+ return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
}
int JavaScriptFrame::ComputeParametersCount() const {
- Address base = pp() + JavaScriptFrameConstants::kReceiverOffset;
+ Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
return (base - limit) / kPointerSize;
}
@@ -681,7 +683,7 @@ void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
Object** base = &Memory::Object_at(fp() + kBaseOffset);
- Object** limit = &Memory::Object_at(pp() + kLimitOffset) + 1;
+ Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
v->VisitPointers(base, limit);
}
diff --git a/V8Binding/v8/src/frames.h b/V8Binding/v8/src/frames.h
index e250609..f002e12 100644
--- a/V8Binding/v8/src/frames.h
+++ b/V8Binding/v8/src/frames.h
@@ -78,9 +78,6 @@ class StackHandler BASE_EMBEDDED {
void Cook(Code* code);
void Uncook(Code* code);
- // TODO(1233780): Get rid of the code slot in stack handlers.
- static const int kCodeNotPresent = 0;
-
private:
// Accessors.
inline State state() const;
@@ -132,7 +129,7 @@ class StackFrame BASE_EMBEDDED {
// Accessors.
Address sp() const { return state_.sp; }
Address fp() const { return state_.fp; }
- Address pp() const { return GetCallerStackPointer(); }
+ Address caller_sp() const { return GetCallerStackPointer(); }
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
@@ -140,7 +137,7 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame.
- Id id() const { return static_cast<Id>(OffsetFrom(pp())); }
+ Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
// Checks if this frame includes any stack handlers.
bool HasHandler() const;
@@ -337,7 +334,6 @@ class StandardFrame: public StackFrame {
virtual void ComputeCallerState(State* state) const;
// Accessors.
- inline Address caller_sp() const;
inline Address caller_fp() const;
inline Address caller_pc() const;
diff --git a/V8Binding/v8/src/globals.h b/V8Binding/v8/src/globals.h
index 2b0fe15..bf83d0d 100644
--- a/V8Binding/v8/src/globals.h
+++ b/V8Binding/v8/src/globals.h
@@ -120,8 +120,10 @@ const int kIntptrSize = sizeof(intptr_t); // NOLINT
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
+const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
#else
const int kPointerSizeLog2 = 2;
+const intptr_t kIntptrSignBit = 0x80000000;
#endif
const int kObjectAlignmentBits = kPointerSizeLog2;
diff --git a/V8Binding/v8/src/heap-inl.h b/V8Binding/v8/src/heap-inl.h
index 8dd09d7..810d3d4 100644
--- a/V8Binding/v8/src/heap-inl.h
+++ b/V8Binding/v8/src/heap-inl.h
@@ -34,7 +34,7 @@
namespace v8 {
namespace internal {
-int Heap::MaxHeapObjectSize() {
+int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
@@ -215,26 +215,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-Object* Heap::GetKeyedLookupCache() {
- if (keyed_lookup_cache()->IsUndefined()) {
- Object* obj = LookupCache::Allocate(4);
- if (obj->IsFailure()) return obj;
- keyed_lookup_cache_ = obj;
- }
- return keyed_lookup_cache();
-}
-
-
-void Heap::SetKeyedLookupCache(LookupCache* cache) {
- keyed_lookup_cache_ = cache;
-}
-
-
-void Heap::ClearKeyedLookupCache() {
- keyed_lookup_cache_ = undefined_value();
-}
-
-
void Heap::SetLastScriptId(Object* last_script_id) {
last_script_id_ = last_script_id;
}
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index df8ae6b..a29340c 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -79,9 +79,15 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-int Heap::semispace_size_ = 2*MB;
+#if V8_TARGET_ARCH_ARM
+int Heap::semispace_size_ = 512*KB;
+int Heap::old_generation_size_ = 128*MB;
+int Heap::initial_semispace_size_ = 128*KB;
+#else
+int Heap::semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB;
-int Heap::initial_semispace_size_ = 256*KB;
+int Heap::initial_semispace_size_ = 512*KB;
+#endif
GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL;
@@ -90,9 +96,8 @@ GCCallback Heap::global_gc_epilogue_callback_ = NULL;
// ConfigureHeap.
int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
-// Double the new space after this many scavenge collections.
-int Heap::new_space_growth_limit_ = 8;
-int Heap::scavenge_count_ = 0;
+int Heap::survived_since_last_expansion_ = 0;
+
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
@@ -421,7 +426,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
- old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
+ old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
// If we have used the mark-compact collector to collect the new
@@ -495,7 +500,9 @@ void Heap::MarkCompact(GCTracer* tracer) {
void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
- ClearKeyedLookupCache();
+ KeyedLookupCache::Clear();
+ ContextSlotCache::Clear();
+ DescriptorLookupCache::Clear();
CompilationCache::MarkCompactPrologue();
@@ -624,16 +631,20 @@ void Heap::Scavenge() {
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
- scavenge_count_++;
+ // Clear descriptor cache.
+ DescriptorLookupCache::Clear();
+
+ // Used for updating survived_since_last_expansion_ at function end.
+ int survived_watermark = PromotedSpaceSize();
+
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
- scavenge_count_ > new_space_growth_limit_) {
- // Double the size of the new space, and double the limit. The next
- // doubling attempt will occur after the current new_space_growth_limit_
- // more collections.
+ survived_since_last_expansion_ > new_space_.Capacity()) {
+ // Double the size of new space if there is room to grow and enough
+ // data has survived scavenge since the last expansion.
// TODO(1240712): NewSpace::Double has a return value which is
// ignored here.
new_space_.Double();
- new_space_growth_limit_ *= 2;
+ survived_since_last_expansion_ = 0;
}
// Flip the semispaces. After flipping, to space is empty, from space has
@@ -737,6 +748,10 @@ void Heap::Scavenge() {
// Set age mark.
new_space_.set_age_mark(new_space_.top());
+ // Update how much has survived scavenge.
+ survived_since_last_expansion_ +=
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
@@ -933,17 +948,15 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space_ ||
- target_space == Heap::old_data_space_);
- Object* result = target_space->AllocateRaw(object_size);
- if (!result->IsFailure()) {
- HeapObject* target = HeapObject::cast(result);
- if (target_space == Heap::old_pointer_space_) {
+ Object* result;
+ if (object_size > MaxObjectSizeInPagedSpace()) {
+ result = lo_space_->AllocateRawFixedArray(object_size);
+ if (!result->IsFailure()) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
+ HeapObject* target = HeapObject::cast(result);
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
@@ -954,21 +967,45 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
node->set_size(object_size);
*p = target;
- } else {
- // Objects promoted to the data space can be copied immediately
- // and not revisited---we will never sweep that space for
- // pointers and the copied objects do not contain pointers to
- // new space objects.
- *p = MigrateObject(object, target, object_size);
+ return;
+ }
+ } else {
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space_ ||
+ target_space == Heap::old_data_space_);
+ result = target_space->AllocateRaw(object_size);
+ if (!result->IsFailure()) {
+ HeapObject* target = HeapObject::cast(result);
+ if (target_space == Heap::old_pointer_space_) {
+ // Save the from-space object pointer and its map pointer at the
+ // top of the to space to be swept and copied later. Write the
+ // forwarding address over the map word of the from-space
+ // object.
+ promotion_queue.insert(object, first_word.ToMap());
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+
+ // Give the space allocated for the result a proper map by
+ // treating it as a free list node (not linked into the free
+ // list).
+ FreeListNode* node = FreeListNode::FromAddress(target->address());
+ node->set_size(object_size);
+
+ *p = target;
+ } else {
+ // Objects promoted to the data space can be copied immediately
+ // and not revisited---we will never sweep that space for
+ // pointers and the copied objects do not contain pointers to
+ // new space objects.
+ *p = MigrateObject(object, target, object_size);
#ifdef DEBUG
- VerifyNonPointerSpacePointersVisitor v;
- (*p)->Iterate(&v);
+ VerifyNonPointerSpacePointersVisitor v;
+ (*p)->Iterate(&v);
#endif
+ }
+ return;
}
- return;
}
}
-
// The object should remain in new space or the old space allocation failed.
Object* result = new_space_.AllocateRaw(object_size);
// Failed allocation at this point is utterly unexpected.
@@ -1033,6 +1070,11 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
oddball_map_ = Map::cast(obj);
+ obj = AllocatePartialMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
+ JSGlobalPropertyCell::kSize);
+ if (obj->IsFailure()) return false;
+ global_property_cell_map_ = Map::cast(obj);
+
// Allocate the empty array
obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false;
@@ -1058,6 +1100,10 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
+ global_property_cell_map()->set_instance_descriptors(
+ empty_descriptor_array());
+ global_property_cell_map()->set_code_cache(empty_fixed_array());
+
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
meta_map()->set_constructor(null_value());
@@ -1067,6 +1113,9 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
+ global_property_cell_map()->set_prototype(null_value());
+ global_property_cell_map()->set_constructor(null_value());
+
obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (obj->IsFailure()) return false;
heap_number_map_ = Map::cast(obj);
@@ -1193,6 +1242,17 @@ Object* Heap::AllocateHeapNumber(double value) {
}
+Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
+ Object* result = AllocateRaw(JSGlobalPropertyCell::kSize,
+ OLD_POINTER_SPACE,
+ OLD_POINTER_SPACE);
+ if (result->IsFailure()) return result;
+ HeapObject::cast(result)->set_map(global_property_cell_map());
+ JSGlobalPropertyCell::cast(result)->set_value(value);
+ return result;
+}
+
+
Object* Heap::CreateOddball(Map* map,
const char* to_string,
Object* to_number) {
@@ -1251,14 +1311,14 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope;
- // gcc-4.4 has problem to generate the correct vtables if the following
- // functions are inlined. e.g.,
+ // gcc-4.4 has problem generating correct code of following snippet:
// { CEntryStub stub;
// c_entry_code_ = *stub.GetCode();
// }
// { CEntryDebugBreakStub stub;
// c_entry_debug_break_code_ = *stub.GetCode();
// }
+ // To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub();
Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub();
@@ -1375,7 +1435,13 @@ bool Heap::CreateInitialObjects() {
last_script_id_ = undefined_value();
// Initialize keyed lookup cache.
- ClearKeyedLookupCache();
+ KeyedLookupCache::Clear();
+
+ // Initialize context slot cache.
+ ContextSlotCache::Clear();
+
+ // Initialize descriptor cache.
+ DescriptorLookupCache::Clear();
// Initialize compilation cache.
CompilationCache::Clear();
@@ -1499,6 +1565,8 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = Builtins::builtin(Builtins::Illegal);
share->set_code(illegal);
+ Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0);
share->set_length(0);
share->set_formal_parameter_count(0);
@@ -1512,14 +1580,24 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
}
-Object* Heap::AllocateConsString(String* first,
- String* second) {
+Object* Heap::AllocateConsString(String* first, String* second) {
int first_length = first->length();
+ if (first_length == 0) return second;
+
int second_length = second->length();
+ if (second_length == 0) return first;
+
int length = first_length + second_length;
bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation();
+ // Make sure that an out of memory exception is thrown if the length
+ // of the new cons string is too large to fit in a Smi.
+ if (length > Smi::kMaxValue || length < -0) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+
// If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat());
@@ -1529,8 +1607,12 @@ Object* Heap::AllocateConsString(String* first,
if (result->IsFailure()) return result;
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
- String::WriteToFlat(first, dest, 0, first_length);
- String::WriteToFlat(second, dest + first_length, 0, second_length);
+ // Copy first part.
+ char* src = SeqAsciiString::cast(first)->GetChars();
+ for (int i = 0; i < first_length; i++) *dest++ = src[i];
+ // Copy second part.
+ src = SeqAsciiString::cast(second)->GetChars();
+ for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
Object* result = AllocateRawTwoByteString(length);
@@ -1709,7 +1791,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxHeapObjectSize() ? LO_SPACE : OLD_DATA_SPACE;
+ size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -1724,7 +1806,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* Heap::AllocateByteArray(int length) {
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
+ size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -1759,7 +1841,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
int obj_size = Code::SizeFor(body_size, sinfo_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
Object* result;
- if (obj_size > MaxHeapObjectSize()) {
+ if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size);
} else {
result = code_space_->AllocateRaw(obj_size);
@@ -1787,7 +1869,6 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// through the self_reference parameter.
code->CopyFrom(desc);
if (sinfo != NULL) sinfo->Serialize(code); // write scope info
- LOG(CodeAllocateEvent(code, desc.origin));
#ifdef DEBUG
code->Verify();
@@ -1800,7 +1881,7 @@ Object* Heap::CopyCode(Code* code) {
// Allocate an object the same size as the code object.
int obj_size = code->Size();
Object* result;
- if (obj_size > MaxHeapObjectSize()) {
+ if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size);
} else {
result = code_space_->AllocateRaw(obj_size);
@@ -1975,7 +2056,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the JSObject.
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
+ if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -1997,7 +2078,34 @@ Object* Heap::AllocateJSObject(JSFunction* constructor,
Map::cast(initial_map)->set_constructor(constructor);
}
// Allocate the object based on the constructors initial map.
- return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
+ Object* result =
+ AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
+ // Make sure result is NOT a JS global object if valid.
+ ASSERT(result->IsFailure() || !result->IsJSGlobalObject());
+ return result;
+}
+
+
+Object* Heap::AllocateJSGlobalObject(JSFunction* constructor) {
+ ASSERT(constructor->has_initial_map());
+ // Make sure no field properties are described in the initial map.
+ // This guarantees us that normalizing the properties does not
+ // require us to change property values to JSGlobalPropertyCells.
+ ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0);
+
+ // Allocate the object based on the constructors initial map.
+ Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED);
+ if (result->IsFailure()) return result;
+
+ // Normalize the result.
+ JSObject* global = JSObject::cast(result);
+ result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ if (result->IsFailure()) return result;
+
+ // Make sure result is a JS global object with properties in dictionary.
+ ASSERT(global->IsJSGlobalObject());
+ ASSERT(!global->HasFastProperties());
+ return global;
}
@@ -2262,7 +2370,7 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
AllocationSpace space =
- (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -2284,13 +2392,16 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqAsciiString::SizeFor(length);
- if (size > MaxHeapObjectSize()) {
- space = LO_SPACE;
- }
- // Use AllocateRaw rather than Allocate because the object's size cannot be
- // determined from the map.
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (space == NEW_SPACE) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ } else {
+ if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ }
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
@@ -2314,13 +2425,16 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqTwoByteString::SizeFor(length);
- if (size > MaxHeapObjectSize()) {
- space = LO_SPACE;
- }
- // Use AllocateRaw rather than Allocate because the object's size cannot be
- // determined from the map.
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (space == NEW_SPACE) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ } else {
+ if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ }
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
@@ -2357,9 +2471,9 @@ Object* Heap::AllocateRawFixedArray(int length) {
if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
- return (size > MaxHeapObjectSize())
- ? lo_space_->AllocateRawFixedArray(size)
- : new_space_.AllocateRaw(size);
+ return size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
}
@@ -2407,16 +2521,22 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
int size = FixedArray::SizeFor(length);
- Object* result;
- if (size > MaxHeapObjectSize()) {
- result = lo_space_->AllocateRawFixedArray(size);
- } else {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ Object* result = Failure::OutOfMemoryException();
+ if (pretenure != TENURED) {
+ result = size <= kMaxObjectSizeInNewSpace
+ ? new_space_.AllocateRaw(size)
+ : lo_space_->AllocateRawFixedArray(size);
+ }
+ if (result->IsFailure()) {
+ if (size > MaxObjectSizeInPagedSpace()) {
+ result = lo_space_->AllocateRawFixedArray(size);
+ } else {
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
+ result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ }
+ if (result->IsFailure()) return result;
}
- if (result->IsFailure()) return result;
-
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
@@ -2516,7 +2636,7 @@ STRUCT_LIST(MAKE_CASE)
}
int size = map->instance_size();
AllocationSpace space =
- (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result = Heap::Allocate(map, space);
if (result->IsFailure()) return result;
Struct::cast(result)->InitializeBody(size);
@@ -3490,6 +3610,58 @@ const char* GCTracer::CollectorString() {
}
+int KeyedLookupCache::Hash(Map* map, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
+ return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int KeyedLookupCache::Lookup(Map* map, String* name) {
+ int index = Hash(map, name);
+ Key& key = keys_[index];
+ if ((key.map == map) && key.name->Equals(name)) {
+ return field_offsets_[index];
+ }
+ return -1;
+}
+
+
+void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
+ String* symbol;
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(map, symbol);
+ Key& key = keys_[index];
+ key.map = map;
+ key.name = symbol;
+ field_offsets_[index] = field_offset;
+ }
+}
+
+
+void KeyedLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+
+KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
+
+
+int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
+
+
+void DescriptorLookupCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
+}
+
+
+DescriptorLookupCache::Key
+DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
+
+int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
+
+
#ifdef DEBUG
bool Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index 856de20..3667348 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -99,6 +99,7 @@ namespace internal {
V(Map, global_context_map) \
V(Map, code_map) \
V(Map, oddball_map) \
+ V(Map, global_property_cell_map) \
V(Map, boilerplate_function_map) \
V(Map, shared_function_info_map) \
V(Map, proxy_map) \
@@ -126,7 +127,6 @@ namespace internal {
V(FixedArray, number_string_cache) \
V(FixedArray, single_character_string_cache) \
V(FixedArray, natives_source_cache) \
- V(Object, keyed_lookup_cache) \
V(Object, last_script_id)
@@ -243,9 +243,8 @@ class Heap : public AllStatic {
// all available bytes. Check MaxHeapObjectSize() instead.
static int Available();
- // Returns the maximum object size that heap supports. Objects larger than
- // the maximum heap object size are allocated in a large object space.
- static inline int MaxHeapObjectSize();
+ // Returns the maximum object size in paged space.
+ static inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap.
static int SizeOfObjects();
@@ -290,6 +289,12 @@ class Heap : public AllStatic {
static Object* AllocateJSObject(JSFunction* constructor,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates and initializes a new JS global object based on a constructor.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateJSGlobalObject(JSFunction* constructor);
+
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
@@ -410,6 +415,12 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateByteArray(int length);
+ // Allocate a tenured JS global property cell.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocateJSGlobalPropertyCell(Object* value);
+
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -446,17 +457,6 @@ class Heap : public AllStatic {
// Allocates a new utility object in the old generation.
static Object* AllocateStruct(InstanceType type);
-
- // Initializes a function with a shared part and prototype.
- // Returns the function.
- // Note: this code was factored out of AllocateFunction such that
- // other parts of the VM could use it. Specifically, a function that creates
- // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
- // Please note this does not perform a garbage collection.
- static Object* InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
-
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -520,8 +520,7 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateConsString(String* first,
- String* second);
+ static Object* AllocateConsString(String* first, String* second);
// Allocates a new sliced string object which is a slice of an underlying
// string buffer stretching from the index start (inclusive) to the index
@@ -700,11 +699,6 @@ class Heap : public AllStatic {
non_monomorphic_cache_ = value;
}
- // Gets, sets and clears the lookup cache used for keyed access.
- static inline Object* GetKeyedLookupCache();
- static inline void SetKeyedLookupCache(LookupCache* cache);
- static inline void ClearKeyedLookupCache();
-
// Update the next script id.
static inline void SetLastScriptId(Object* last_script_id);
@@ -827,14 +821,17 @@ class Heap : public AllStatic {
static int young_generation_size_;
static int old_generation_size_;
- static int new_space_growth_limit_;
- static int scavenge_count_;
+ // For keeping track of how much data has survived
+ // scavenge since last new space expansion.
+ static int survived_since_last_expansion_;
static int always_allocate_scope_depth_;
static bool context_disposed_pending_;
static const int kMaxMapSpaceSize = 8*MB;
+ static const int kMaxObjectSizeInNewSpace = 256*KB;
+
static NewSpace new_space_;
static OldSpace* old_pointer_space_;
static OldSpace* old_data_space_;
@@ -938,13 +935,13 @@ class Heap : public AllStatic {
static bool CreateInitialObjects();
// These four Create*EntryStub functions are here because of a gcc-4.4 bug
- // that assign wrong vptr entries.
+ // that assigns wrong vtable entries.
static void CreateCEntryStub();
static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub();
static void CreateJSConstructEntryStub();
-
static void CreateFixedStubs();
+
static Object* CreateOddball(Map* map,
const char* to_string,
Object* to_number);
@@ -996,7 +993,17 @@ class Heap : public AllStatic {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Copy memory from src to dst.
- inline static void CopyBlock(Object** dst, Object** src, int byte_size);
+ static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+
+ // Initializes a function with a shared part and prototype.
+ // Returns the function.
+ // Note: this code was factored out of AllocateFunction such that
+ // other parts of the VM could use it. Specifically, a function that creates
+ // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
+ // Please note this does not perform a garbage collection.
+ static inline Object* InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1147,6 +1154,84 @@ class HeapIterator BASE_EMBEDDED {
};
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+ // Lookup field offset for (map, name). If absent, -1 is returned.
+ static int Lookup(Map* map, String* name);
+
+ // Update an element in the cache.
+ static void Update(Map* map, String* name, int field_offset);
+
+ // Clear the cache.
+ static void Clear();
+ private:
+ inline static int Hash(Map* map, String* name);
+ static const int kLength = 64;
+ struct Key {
+ Map* map;
+ String* name;
+ };
+ static Key keys_[kLength];
+ static int field_offsets_[kLength];
+};
+
+
+
+// Cache for mapping (array, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+ // Lookup descriptor index for (map, name).
+ // If absent, kAbsent is returned.
+ static int Lookup(DescriptorArray* array, String* name) {
+ if (!StringShape(name).IsSymbol()) return kAbsent;
+ int index = Hash(array, name);
+ Key& key = keys_[index];
+ if ((key.array == array) && (key.name == name)) return results_[index];
+ return kAbsent;
+ }
+
+ // Update an element in the cache.
+ static void Update(DescriptorArray* array, String* name, int result) {
+ ASSERT(result != kAbsent);
+ if (StringShape(name).IsSymbol()) {
+ int index = Hash(array, name);
+ Key& key = keys_[index];
+ key.array = array;
+ key.name = name;
+ results_[index] = result;
+ }
+ }
+
+ // Clear the cache.
+ static void Clear();
+
+ static const int kAbsent = -2;
+ private:
+ static int Hash(DescriptorArray* array, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t array_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
+ uintptr_t name_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
+ return (array_hash ^ name_hash) % kLength;
+ }
+
+ static const int kLength = 64;
+ struct Key {
+ DescriptorArray* array;
+ String* name;
+ };
+
+ static Key keys_[kLength];
+ static int results_[kLength];
+};
+
+
// ----------------------------------------------------------------------------
// Marking stack for tracing live objects.
diff --git a/V8Binding/v8/src/ia32/assembler-ia32-inl.h b/V8Binding/v8/src/ia32/assembler-ia32-inl.h
index 045f176..9a5352b 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32-inl.h
+++ b/V8Binding/v8/src/ia32/assembler-ia32-inl.h
@@ -48,7 +48,7 @@ Condition NegateCondition(Condition cc) {
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(int delta) {
+void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // relocate entry
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.cc b/V8Binding/v8/src/ia32/assembler-ia32.cc
index 434bf07..f3cb854 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/assembler-ia32.cc
@@ -117,7 +117,8 @@ void CpuFeatures::Probe() {
Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
if (!code->IsCode()) return;
- LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+ LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
@@ -918,6 +919,14 @@ void Assembler::idiv(Register src) {
}
+void Assembler::imul(Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF7);
+ EMIT(0xE8 | reg.code());
+}
+
+
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1416,7 +1425,7 @@ void Assembler::call(const Operand& adr) {
}
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
WriteRecordedPositions();
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1655,6 +1664,22 @@ void Assembler::fchs() {
}
+void Assembler::fcos() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xD9);
+ EMIT(0xFE);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1798,7 +1823,7 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xdF);
+ EMIT(0xDF);
EMIT(0xE0);
}
@@ -2165,17 +2190,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-void Assembler::WriteInternalReference(int position, const Label& bound_label) {
- ASSERT(bound_label.is_bound());
- ASSERT(0 <= position);
- ASSERT(position + static_cast<int>(sizeof(uint32_t)) <= pc_offset());
- ASSERT(long_at(position) == 0); // only initialize once!
-
- uint32_t label_loc = reinterpret_cast<uint32_t>(addr_at(bound_label.pos()));
- long_at_put(position, label_loc);
-}
-
-
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.h b/V8Binding/v8/src/ia32/assembler-ia32.h
index 79f239d..70b510e 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/assembler-ia32.h
@@ -396,10 +396,15 @@ class CpuFeatures : public AllStatic {
class Assembler : public Malloced {
private:
- // The relocation writer's position is kGap bytes below the end of
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
// the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction (17 bytes as of 9/26/06) and
- // allows for a single, fast space check per instruction.
+ // longest possible ia32 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on ia32 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
static const int kGap = 32;
public:
@@ -539,15 +544,18 @@ class Assembler : public Malloced {
void idiv(Register src);
- void imul(Register dst, const Operand& src);
- void imul(Register dst, Register src, int32_t imm32);
+ // Signed multiply instructions.
+ void imul(Register src); // edx:eax = eax * src.
+ void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
void inc(Register dst);
void inc(const Operand& dst);
void lea(Register dst, const Operand& src);
- void mul(Register src);
+ // Unsigned multiply instruction.
+ void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);
@@ -658,6 +666,8 @@ class Assembler : public Malloced {
void fabs();
void fchs();
+ void fcos();
+ void fsin();
void fadd(int i);
void fsub(int i);
@@ -729,11 +739,6 @@ class Assembler : public Malloced {
// Used for inline tables, e.g., jump-tables.
void dd(uint32_t data, RelocInfo::Mode reloc_info);
- // Writes the absolute address of a bound label at the given position in
- // the generated code. That positions should have the relocation mode
- // internal_reference!
- void WriteInternalReference(int position, const Label& bound_label);
-
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc
index f65074b..3cafd90 100644
--- a/V8Binding/v8/src/ia32/builtins-ia32.cc
+++ b/V8Binding/v8/src/ia32/builtins-ia32.cc
@@ -63,6 +63,25 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
+ // Jump to the function-specific construct stub.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(Operand(ebx));
+
+ // edi: called object
+ // eax: number of arguments
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing eax).
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -113,7 +132,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
- ASSERT(Heap::MaxHeapObjectSize() >= (1 << kBitsPerByte));
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
__ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
@@ -175,7 +194,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- ASSERT(Heap::MaxHeapObjectSize() >
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
__ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
@@ -305,16 +324,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
-
- // edi: called object
- // eax: number of arguments
- __ bind(&non_function_call);
-
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
}
diff --git a/V8Binding/v8/src/ia32/codegen-ia32-inl.h b/V8Binding/v8/src/ia32/codegen-ia32-inl.h
index 49c706d..44e937a 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32-inl.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32-inl.h
@@ -39,6 +39,16 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(COS, args);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc
index e9e4061..3b2eaa0 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.cc
+++ b/V8Binding/v8/src/ia32/codegen-ia32.cc
@@ -175,18 +175,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
- // Allocate the arguments object and copy the parameters into it.
- if (scope_->arguments() != NULL) {
- ASSERT(scope_->arguments_shadow() != NULL);
- Comment cmnt(masm_, "[ Allocate arguments object");
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope_->num_parameters()));
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
- }
-
+ // Allocate the local context if needed.
if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -247,27 +236,11 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
}
- // This section stores the pointer to the arguments object that
- // was allocated and copied into above. If the address was not
- // saved to TOS, we push ecx onto the stack.
- //
// Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ store arguments object");
- { Reference shadow_ref(this, scope_->arguments_shadow());
- ASSERT(shadow_ref.is_slot());
- { Reference arguments_ref(this, scope_->arguments());
- ASSERT(arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- arguments_ref.SetValue(NOT_CONST_INIT);
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
- }
- frame_->Drop(); // Value is no longer needed.
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
}
// Generate code to 'execute' declarations and initialize functions
@@ -317,9 +290,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
if (function_return_.is_bound()) {
function_return_.Jump(&undefined);
} else {
- // Though this is a (possibly) backward block, the frames
- // can only differ on their top element.
- function_return_.Bind(&undefined, 1);
+ function_return_.Bind(&undefined);
GenerateReturnSequence(&undefined);
}
} else if (function_return_.is_linked()) {
@@ -329,9 +300,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// compile an artificial return statement just above, and (b) there
// are return statements in the body but (c) they are all shadowed.
Result return_value;
- // Though this is a (possibly) backward block, the frames can
- // only differ on their top element.
- function_return_.Bind(&return_value, 1);
+ function_return_.Bind(&return_value);
GenerateReturnSequence(&return_value);
}
}
@@ -595,6 +564,71 @@ void CodeGenerator::LoadTypeofExpression(Expression* x) {
}
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+ if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope_->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope_->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ Reference arguments_ref(this, scope_->arguments());
+ ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+ // Here we rely on the convenient property that references to slot
+ // take up zero space in the frame (ie, it doesn't matter that the
+ // stored value is actually below the reference on the frame).
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result arguments = frame_->Pop();
+ if (arguments.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !arguments.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
+ arguments.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ arguments_ref.SetValue(NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ shadow_ref.SetValue(NOT_CONST_INIT);
+ }
+ return frame_->Pop();
+}
+
+
Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
@@ -718,6 +752,11 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
class FloatingPointHelper : public AllStatic {
public:
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand on TOS+1. Returns operand as floating point number on FPU
+ // stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
// operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
@@ -734,7 +773,8 @@ class FloatingPointHelper : public AllStatic {
static void AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch1,
- Register scratch2);
+ Register scratch2,
+ Register result);
};
@@ -879,15 +919,15 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result left = frame_->Pop();
if (op == Token::ADD) {
- bool left_is_string = left.static_type().is_jsstring();
- bool right_is_string = right.static_type().is_jsstring();
+ bool left_is_string = left.is_constant() && left.handle()->IsString();
+ bool right_is_string = right.is_constant() && right.handle()->IsString();
if (left_is_string || right_is_string) {
frame_->Push(&left);
frame_->Push(&right);
Result answer;
if (left_is_string) {
if (right_is_string) {
- // TODO(lrn): if (left.is_constant() && right.is_constant())
+ // TODO(lrn): if both are constant strings
// -- do a compile time cons, if allocation during codegen is allowed.
answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
} else {
@@ -898,7 +938,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
}
- answer.set_static_type(StaticType::jsstring());
frame_->Push(&answer);
return;
}
@@ -1385,7 +1424,11 @@ class DeferredInlineSmiOperation: public DeferredCode {
void DeferredInlineSmiOperation::Generate() {
__ push(src_);
__ push(Immediate(value_));
- GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
__ CallStub(&stub);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1691,6 +1734,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
int shift_value = int_value & 0x1f;
operand->ToRegister();
if (shift_value == 0) {
+ // Spill operand so it can be overwritten in the slow case.
+ frame_->Spill(operand->reg());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
operand->reg(),
@@ -1768,6 +1813,33 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
break;
}
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
+ deferred->Branch(not_zero);
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
+ } else {
+ __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ break;
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+
default: {
Result constant_operand(value);
if (reversed) {
@@ -1784,34 +1856,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
-
- Major MajorKey() { return Compare; }
-
- int MinorKey() {
- // Encode the three parameters in a unique 16 bit value.
- ASSERT(static_cast<int>(cc_) < (1 << 15));
- return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
- }
-
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (cc %d), (strict %s)\n",
- static_cast<int>(cc_),
- strict_ ? "true" : "false");
- }
-#endif
-};
-
-
void CodeGenerator::Comparison(Condition cc,
bool strict,
ControlDestination* dest) {
@@ -1873,13 +1917,19 @@ void CodeGenerator::Comparison(Condition cc,
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
- ASSERT(left_side.is_valid());
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
__ test(left_side.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(zero, &left_side, &right_side, taken);
+ is_smi.Branch(zero, taken);
- // Setup and call the compare stub, which expects its arguments
- // in registers.
+ // Setup and call the compare stub.
CompareStub stub(cc, strict);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
@@ -1888,12 +1938,12 @@ void CodeGenerator::Comparison(Condition cc,
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
- is_smi.Bind(&left_side, &right_side);
- left_side.ToRegister();
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
if (IsUnsafeSmi(right_side.handle())) {
right_side.ToRegister();
- ASSERT(right_side.is_valid());
__ cmp(left_side.reg(), Operand(right_side.reg()));
} else {
__ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
@@ -1945,35 +1995,50 @@ void CodeGenerator::Comparison(Condition cc,
(right_side.is_constant() && !right_side.handle()->IsSmi());
left_side.ToRegister();
right_side.ToRegister();
- JumpTarget is_smi;
- if (!known_non_smi) {
- // Check for the smi case.
+
+ if (known_non_smi) {
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ if (cc == equal) {
+ __ test(answer.reg(), Operand(answer.reg()));
+ } else {
+ __ cmp(answer.reg(), 0);
+ }
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
+
Result temp = allocator_->Allocate();
ASSERT(temp.is_valid());
__ mov(temp.reg(), left_side.reg());
__ or_(temp.reg(), Operand(right_side.reg()));
__ test(temp.reg(), Immediate(kSmiTagMask));
temp.Unuse();
- is_smi.Branch(zero, &left_side, &right_side, taken);
- }
- // When non-smi, call out to the compare stub, which expects its
- // arguments in registers.
- CompareStub stub(cc, strict);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- if (cc == equal) {
- __ test(answer.reg(), Operand(answer.reg()));
- } else {
- __ cmp(answer.reg(), 0);
- }
- answer.Unuse();
- if (known_non_smi) {
- dest->Split(cc);
- } else {
+ is_smi.Branch(zero, taken);
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ if (cc == equal) {
+ __ test(answer.reg(), Operand(answer.reg()));
+ } else {
+ __ cmp(answer.reg(), 0);
+ }
+ answer.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
- is_smi.Bind(&left_side, &right_side);
- left_side.ToRegister();
- right_side.ToRegister();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
__ cmp(left_side.reg(), Operand(right_side.reg()));
right_side.Unuse();
left_side.Unuse();
@@ -2028,6 +2093,176 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
+void CodeGenerator::CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+ ASSERT(arguments->IsArguments());
+
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
+ // give us a megamorphic load site. Not super, but it works.
+ Reference ref(this, apply);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+ ASSERT(ref.type() == Reference::NAMED);
+
+ // Load the receiver and the existing arguments object onto the
+ // expression stack. Avoid allocating the arguments object here.
+ Load(receiver);
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+ // Emit the source position information after having loaded the
+ // receiver and the arguments.
+ CodeForSourcePosition(position);
+
+ // Check if the arguments object has been lazily allocated
+ // already. If so, just use that instead of copying the arguments
+ // from the stack. This also deals with cases where a local variable
+ // named 'arguments' has been introduced.
+ frame_->Dup();
+ Result probe = frame_->Pop();
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
+
+ if (try_lazy) {
+ JumpTarget build_args;
+
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ { frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ // We allow all JSObjects including JSFunctions. As long as
+ // JS_FUNCTION_TYPE is the last instance type and it is right
+ // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+ // bound.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
+ build_args.Branch(less);
+ }
+
+ // Verify that we're invoking Function.prototype.apply.
+ { frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ __ test(apply.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ mov(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ Immediate(apply_code));
+ build_args.Branch(not_equal);
+ }
+
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ shr(eax, kSmiTagSize);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ bind(&loop);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
+ __ dec(ecx);
+ __ jmp(&loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(eax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
+ frame_->RestoreContextRegister();
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -2326,9 +2561,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
// code by jumping to the return site.
function_return_.Jump(&return_value);
} else {
- // Though this is a (possibly) backward block, the frames can
- // only differ on their top element.
- function_return_.Bind(&return_value, 1);
+ function_return_.Bind(&return_value);
GenerateReturnSequence(&return_value);
}
}
@@ -2397,131 +2630,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
}
-int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastSwitchMaxOverheadFactor;
-}
-
-
-int CodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastSwitchMinCaseCount;
-}
-
-
-// Generate a computed jump to a switch case.
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node,
- int min_index,
- int range,
- Label* default_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels) {
- // Notice: Internal references, used by both the jmp instruction and
- // the table entries, need to be relocated if the buffer grows. This
- // prevents the forward use of Labels, since a displacement cannot
- // survive relocation, and it also cannot safely be distinguished
- // from a real address. Instead we put in zero-values as
- // placeholders, and fill in the addresses after the labels have been
- // bound.
-
- JumpTarget setup_default;
- JumpTarget is_smi;
-
- // A non-null default label pointer indicates a default case among
- // the case labels. Otherwise we use the break target as a
- // "default".
- JumpTarget* default_target =
- (default_label == NULL) ? node->break_target() : &setup_default;
-
- // Test whether input is a smi.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- Result switch_value = frame_->Pop();
- switch_value.ToRegister();
- __ test(switch_value.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(equal, &switch_value, taken);
-
- // It's a heap object, not a smi or a failure. Check if it is a
- // heap number.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ CmpObjectType(switch_value.reg(), HEAP_NUMBER_TYPE, temp.reg());
- temp.Unuse();
- default_target->Branch(not_equal);
-
- // The switch value is a heap number. Convert it to a smi.
- frame_->Push(&switch_value);
- Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1);
-
- is_smi.Bind(&smi_value);
- smi_value.ToRegister();
- // Convert the switch value to a 0-based table index.
- if (min_index != 0) {
- frame_->Spill(smi_value.reg());
- __ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize));
- }
- // Go to the default case if the table index is negative or not a smi.
- __ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask));
- default_target->Branch(not_equal, not_taken);
- __ cmp(smi_value.reg(), range << kSmiTagSize);
- default_target->Branch(greater_equal, not_taken);
-
- // The expected frame at all the case labels is a version of the
- // current one (the bidirectional entry frame, which an arbitrary
- // frame of the correct height can be merged to). Keep a copy to
- // restore at the start of every label. Create a jump target and
- // bind it to set its entry frame properly.
- JumpTarget entry_target(JumpTarget::BIDIRECTIONAL);
- entry_target.Bind(&smi_value);
- VirtualFrame* start_frame = new VirtualFrame(frame_);
-
- // 0 is placeholder.
- // Jump to the address at table_address + 2 * smi_value.reg().
- // The target of the jump is read from table_address + 4 * switch_value.
- // The Smi encoding of smi_value.reg() is 2 * switch_value.
- smi_value.ToRegister();
- __ jmp(Operand(smi_value.reg(), smi_value.reg(),
- times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
- smi_value.Unuse();
- // Calculate address to overwrite later with actual address of table.
- int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
- __ Align(4);
- Label table_start;
- __ bind(&table_start);
- __ WriteInternalReference(jump_table_ref, table_start);
-
- for (int i = 0; i < range; i++) {
- // These are the table entries. 0x0 is the placeholder for case address.
- __ dd(0x0, RelocInfo::INTERNAL_REFERENCE);
- }
-
- GenerateFastCaseSwitchCases(node, case_labels, start_frame);
-
- // If there was a default case, we need to emit the code to match it.
- if (default_label != NULL) {
- if (has_valid_frame()) {
- node->break_target()->Jump();
- }
- setup_default.Bind();
- frame_->MergeTo(start_frame);
- __ jmp(default_label);
- DeleteFrame();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-
- for (int i = 0, entry_pos = table_start.pos();
- i < range;
- i++, entry_pos += sizeof(uint32_t)) {
- if (case_targets[i] == NULL) {
- __ WriteInternalReference(entry_pos,
- *node->break_target()->entry_label());
- } else {
- __ WriteInternalReference(entry_pos, *case_targets[i]);
- }
- }
-}
-
-
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement");
@@ -2531,10 +2639,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compile the switch value.
Load(node->tag());
- if (TryGenerateFastCaseSwitchStatement(node)) {
- return;
- }
-
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
CaseClause* default_clause = NULL;
@@ -3253,7 +3357,6 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// handler structure.
if (FLAG_debug_code) {
__ mov(eax, Operand::StaticVariable(handler_address));
- __ lea(eax, Operand(eax, StackHandlerConstants::kAddressDisplacement));
__ cmp(esp, Operand(eax));
__ Assert(equal, "stack pointer should point to top handler");
}
@@ -3263,6 +3366,7 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the
// frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (has_unlinks) {
@@ -3290,15 +3394,12 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
- __ mov(edx, Operand::StaticVariable(handler_address));
- const int kNextOffset = StackHandlerConstants::kNextOffset +
- StackHandlerConstants::kAddressDisplacement;
- __ lea(esp, Operand(edx, kNextOffset));
+ __ mov(esp, Operand::StaticVariable(handler_address));
frame_->Forget(frame_->height() - handler_height);
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- // next_sp popped.
if (i == kReturnShadowIndex) {
if (!function_return_is_shadowed_) frame_->PrepareForReturn();
@@ -3383,8 +3484,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
if (has_valid_frame()) {
// The next handler address is on top of the frame.
ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(eax);
- __ mov(Operand::StaticVariable(handler_address), eax);
+ frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// Fake a top of stack value (unneeded when FALLING) and set the
@@ -3418,13 +3518,11 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// Reload sp from the top handler, because some statements that
// we break from (eg, for...in) may have left stuff on the
// stack.
- __ mov(edx, Operand::StaticVariable(handler_address));
- const int kNextOffset = StackHandlerConstants::kNextOffset +
- StackHandlerConstants::kAddressDisplacement;
- __ lea(esp, Operand(edx, kNextOffset));
+ __ mov(esp, Operand::StaticVariable(handler_address));
frame_->Forget(frame_->height() - handler_height);
// Unlink this handler and drop it from the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -3690,6 +3788,44 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
Slot* slot,
TypeofState typeof_state,
@@ -3862,7 +3998,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, typeof_state());
}
@@ -4424,23 +4560,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(literal->handle());
- Load(property->obj());
+ Handle<String> name = Handle<String>::cast(literal->handle());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
+ if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+ name->IsEqualTo(CStrVector("apply")) &&
+ args->length() == 2 &&
+ args->at(1)->AsVariableProxy() != NULL &&
+ args->at(1)->AsVariableProxy()->IsArguments()) {
+ // Use the optimized Function.prototype.apply that avoids
+ // allocating lazily allocated arguments objects.
+ CallApplyLazy(property,
+ args->at(0),
+ args->at(1)->AsVariableProxy(),
+ node->position());
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ } else {
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(name);
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ }
} else {
// -------------------------------------------
@@ -4625,48 +4778,82 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
// cons. The slow case will flatten the string, which will ensure that
// the answer is in the left hand side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
- JumpTarget slow_case;
- JumpTarget end;
- JumpTarget not_a_flat_string;
- JumpTarget a_cons_string;
- JumpTarget try_again_with_new_string(JumpTarget::BIDIRECTIONAL);
- JumpTarget ascii_string;
- JumpTarget got_char_code;
+ Label slow_case;
+ Label end;
+ Label not_a_flat_string;
+ Label a_cons_string;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
Load(args->at(0));
Load(args->at(1));
- // Reserve register ecx, to use as shift amount later
- Result shift_amount = allocator()->Allocate(ecx);
- ASSERT(shift_amount.is_valid());
Result index = frame_->Pop();
- index.ToRegister();
Result object = frame_->Pop();
+
+ // Get register ecx to use as shift amount later.
+ Result shift_amount;
+ if (object.is_register() && object.reg().is(ecx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = object;
+ object = fresh;
+ __ mov(object.reg(), ecx);
+ }
+ if (index.is_register() && index.reg().is(ecx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = index;
+ index = fresh;
+ __ mov(index.reg(), ecx);
+ }
+ // There could be references to ecx in the frame. Allocating will
+ // spill them, otherwise spill explicitly.
+ if (shift_amount.is_valid()) {
+ frame_->Spill(ecx);
+ } else {
+ shift_amount = allocator()->Allocate(ecx);
+ }
+ ASSERT(shift_amount.is_register());
+ ASSERT(shift_amount.reg().is(ecx));
+ ASSERT(allocator_->count(ecx) == 1);
+
+ // We will mutate the index register and possibly the object register.
+ // The case where they are somehow the same register is handled
+ // because we only mutate them in the case where the receiver is a
+ // heap object and the index is not.
object.ToRegister();
- // If the receiver is a smi return undefined.
+ index.ToRegister();
+ frame_->Spill(object.reg());
+ frame_->Spill(index.reg());
+
+ // We need a single extra temporary register.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+
+ // There is no virtual frame effect from here up to the final result
+ // push.
+
+ // If the receiver is a smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ test(object.reg(), Immediate(kSmiTagMask));
- slow_case.Branch(zero, not_taken);
+ __ j(zero, &slow_case);
- // Check for negative or non-smi index.
+ // If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
- slow_case.Branch(not_zero, not_taken);
- // Get rid of the smi tag on the index.
- frame_->Spill(index.reg());
+ __ j(not_zero, &slow_case);
+ // Untag the index.
__ sar(index.reg(), kSmiTagSize);
- try_again_with_new_string.Bind(&object, &index, &shift_amount);
- // Get the type of the heap object.
- Result object_type = allocator()->Allocate();
- ASSERT(object_type.is_valid());
- __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
- __ movzx_b(object_type.reg(),
- FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
- // We don't handle non-strings.
- __ test(object_type.reg(), Immediate(kIsNotStringMask));
- slow_case.Branch(not_zero, not_taken);
+ __ bind(&try_again_with_new_string);
+ // Fetch the instance type of the receiver into ecx.
+ __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the slow case.
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &slow_case);
// Here we make assumptions about the tag values and the shifts needed.
// See the comment in objects.h.
@@ -4675,86 +4862,75 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
String::kMediumLengthShift);
ASSERT(kShortStringTag + String::kLongLengthShift ==
String::kShortLengthShift);
- __ mov(shift_amount.reg(), Operand(object_type.reg()));
- __ and_(shift_amount.reg(), kStringSizeMask);
- __ add(Operand(shift_amount.reg()), Immediate(String::kLongLengthShift));
- // Get the length field. Temporary register now used for length.
- Result length = object_type;
- __ mov(length.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ shr(length.reg()); // shift_amount, in ecx, is implicit operand.
+ __ and_(ecx, kStringSizeMask);
+ __ add(Operand(ecx), Immediate(String::kLongLengthShift));
+ // Fetch the length field into the temporary register.
+ __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+ __ shr(temp.reg()); // The shift amount in ecx is implicit operand.
// Check for index out of range.
- __ cmp(index.reg(), Operand(length.reg()));
- slow_case.Branch(greater_equal, not_taken);
- length.Unuse();
- // Load the object type into object_type again.
- // These two instructions are duplicated from above, to save a register.
- __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
- __ movzx_b(object_type.reg(),
- FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
+ __ cmp(index.reg(), Operand(temp.reg()));
+ __ j(greater_equal, &slow_case);
+ // Reload the instance type (into the temp register this time)..
+ __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
- __ test(object_type.reg(), Immediate(kStringRepresentationMask));
- not_a_flat_string.Branch(not_zero, &object, &index, &object_type,
- &shift_amount, not_taken);
- shift_amount.Unuse();
+ __ test(temp.reg(), Immediate(kStringRepresentationMask));
+ __ j(not_zero, &not_a_flat_string);
// Check for 1-byte or 2-byte string.
- __ test(object_type.reg(), Immediate(kStringEncodingMask));
- ascii_string.Branch(not_zero, &object, &index, &object_type, taken);
+ __ test(temp.reg(), Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
// 2-byte string.
- // Load the 2-byte character code.
- __ movzx_w(object_type.reg(), FieldOperand(object.reg(),
- index.reg(),
- times_2,
- SeqTwoByteString::kHeaderSize));
- object.Unuse();
- index.Unuse();
- got_char_code.Jump(&object_type);
+ // Load the 2-byte character code into the temp register.
+ __ movzx_w(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
// ASCII string.
- ascii_string.Bind(&object, &index, &object_type);
- // Load the byte.
- __ movzx_b(object_type.reg(), FieldOperand(object.reg(),
- index.reg(),
- times_1,
- SeqAsciiString::kHeaderSize));
- object.Unuse();
- index.Unuse();
- got_char_code.Bind(&object_type);
+ __ bind(&ascii_string);
+ // Load the byte into the temp register.
+ __ movzx_b(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
ASSERT(kSmiTag == 0);
- __ shl(object_type.reg(), kSmiTagSize);
- frame_->Push(&object_type);
- end.Jump();
+ __ shl(temp.reg(), kSmiTagSize);
+ __ jmp(&end);
// Handle non-flat strings.
- not_a_flat_string.Bind(&object, &index, &object_type, &shift_amount);
- __ and_(object_type.reg(), kStringRepresentationMask);
- __ cmp(object_type.reg(), kConsStringTag);
- a_cons_string.Branch(equal, &object, &index, &shift_amount, taken);
- __ cmp(object_type.reg(), kSlicedStringTag);
- slow_case.Branch(not_equal, not_taken);
- object_type.Unuse();
+ __ bind(&not_a_flat_string);
+ __ and_(temp.reg(), kStringRepresentationMask);
+ __ cmp(temp.reg(), kConsStringTag);
+ __ j(equal, &a_cons_string);
+ __ cmp(temp.reg(), kSlicedStringTag);
+ __ j(not_equal, &slow_case);
// SlicedString.
- // Add the offset to the index.
+ // Add the offset to the index and trigger the slow case on overflow.
__ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
- slow_case.Branch(overflow);
+ __ j(overflow, &slow_case);
// Getting the underlying string is done by running the cons string code.
// ConsString.
- a_cons_string.Bind(&object, &index, &shift_amount);
- // Get the first of the two strings.
- frame_->Spill(object.reg());
- // Both sliced and cons strings store their source string at the same place.
+ __ bind(&a_cons_string);
+ // Get the first of the two strings. Both sliced and cons strings
+ // store their source string at the same offset.
ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
- try_again_with_new_string.Jump(&object, &index, &shift_amount);
+ __ jmp(&try_again_with_new_string);
- // No results live at this point.
- slow_case.Bind();
- frame_->Push(Factory::undefined_value());
- end.Bind();
+ __ bind(&slow_case);
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
+ __ Set(temp.reg(), Immediate(Factory::undefined_value()));
+
+ __ bind(&end);
+ frame_->Push(&temp);
}
@@ -4777,6 +4953,29 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ // Get the frame pointer for the calling frame.
+ Result fp = allocator()->Allocate();
+ __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(not_equal, &check_frame_marker);
+ __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ fp.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
@@ -4789,6 +4988,70 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
+ Load(args->at(0)); // Load the object.
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
+
+ // If the object is a smi, we return null.
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ null.Branch(zero);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ { Result tmp = allocator()->Allocate();
+ __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
+ __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
+ null.Branch(less);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ cmp(tmp.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
+ }
+
+ // Check if the constructor in the map is a function.
+ { Result tmp = allocator()->Allocate();
+ __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ non_function_constructor.Branch(not_equal);
+ }
+
+ // The map register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ mov(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ mov(obj.reg(),
+ FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
+
+ // All done.
+ leave.Bind();
+}
+
+
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
JumpTarget leave;
@@ -4900,6 +5163,98 @@ void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ frame_->SpillAll();
+
+ // Make sure the frame is aligned like the OS expects.
+ static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ __ mov(edi, Operand(esp)); // Save in callee-saved register.
+ __ and_(esp, -kFrameAlignment);
+ }
+
+ // Call V8::RandomPositiveSmi().
+ __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+ // Restore stack pointer from callee-saved register edi.
+ if (kFrameAlignment > 0) {
+ __ mov(esp, Operand(edi));
+ }
+
+ Result result = allocator_->Allocate(eax);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+ JumpTarget done;
+ JumpTarget call_runtime;
+ ASSERT(args->length() == 1);
+
+ // Load number and duplicate it.
+ Load(args->at(0));
+ frame_->Dup();
+
+ // Get the number into an unaliased register and load it onto the
+ // floating point stack still leaving one copy on the frame.
+ Result number = frame_->Pop();
+ number.ToRegister();
+ frame_->Spill(number.reg());
+ FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+ number.Unuse();
+
+ // Perform the operation on the number.
+ switch (op) {
+ case SIN:
+ __ fsin();
+ break;
+ case COS:
+ __ fcos();
+ break;
+ }
+
+ // Go slow case if argument to operation is out of range.
+ __ fnstsw_ax();
+ __ sahf();
+ call_runtime.Branch(parity_even, not_taken);
+
+ // Allocate heap number for result if possible.
+ Result scratch1 = allocator()->Allocate();
+ Result scratch2 = allocator()->Allocate();
+ Result heap_number = allocator()->Allocate();
+ FloatingPointHelper::AllocateHeapNumber(masm_,
+ call_runtime.entry_label(),
+ scratch1.reg(),
+ scratch2.reg(),
+ heap_number.reg());
+ scratch1.Unuse();
+ scratch2.Unuse();
+
+ // Store the result in the allocated heap number.
+ __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+ // Replace the extra copy of the argument with the result.
+ frame_->SetElementAt(0, &heap_number);
+ done.Jump();
+
+ call_runtime.Bind();
+ // Free ST(0) which was not popped before calling into the runtime.
+ __ ffree(0);
+ Result answer;
+ switch (op) {
+ case SIN:
+ answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+ break;
+ case COS:
+ answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+ break;
+ }
+ frame_->Push(&answer);
+ done.Bind();
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5040,7 +5395,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- UnarySubStub stub;
+ bool overwrite =
+ (node->AsBinaryOperation() != NULL &&
+ node->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnarySubStub stub(overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -5446,18 +5804,6 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
}
-class InstanceofStub: public CodeStub {
- public:
- InstanceofStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Instanceof; }
- int MinorKey() { return 0; }
-};
-
-
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
@@ -5728,9 +6074,58 @@ void DeferredReferenceGetKeyedValue::Generate() {
}
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Push receiver and key arguments on the stack.
+ __ push(receiver_);
+ __ push(key_);
+ // Move value argument to eax as expected by the IC stub.
+ if (!value_.is(eax)) __ mov(eax, value_);
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->test(eax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC), key and receiver
+ // registers.
+ if (!value_.is(eax)) __ mov(value_, eax);
+ __ pop(key_);
+ __ pop(receiver_);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
+
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
Property* property = expression_->AsProperty();
@@ -5753,12 +6148,19 @@ void Reference::GetValue(TypeofState typeof_state) {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
MacroAssembler* masm = cgen_->masm();
+
+ // Record the source position for the property load.
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
switch (type_) {
case SLOT: {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
break;
}
@@ -5844,12 +6246,13 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined array index load");
+ Comment cmnt(masm, "[ Inlined load from keyed Property");
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
@@ -5971,13 +6374,16 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST) {
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
GetValue(typeof_state);
return;
}
- // Only non-constant, frame-allocated parameters and locals can reach
- // here.
+ // Only non-constant, frame-allocated parameters and locals can
+ // reach here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
@@ -5990,9 +6396,10 @@ void Reference::TakeValue(TypeofState typeof_state) {
void Reference::SetValue(InitState init_state) {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
switch (type_) {
case SLOT: {
- Comment cmnt(cgen_->masm(), "[ Store to Slot");
+ Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
@@ -6000,7 +6407,7 @@ void Reference::SetValue(InitState init_state) {
}
case NAMED: {
- Comment cmnt(cgen_->masm(), "[ Store to named Property");
+ Comment cmnt(masm, "[ Store to named Property");
cgen_->frame()->Push(GetName());
Result answer = cgen_->frame()->CallStoreIC();
cgen_->frame()->Push(&answer);
@@ -6008,9 +6415,104 @@ void Reference::SetValue(InitState init_state) {
}
case KEYED: {
- Comment cmnt(cgen_->masm(), "[ Store to keyed Property");
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- cgen_->frame()->Push(&answer);
+ Comment cmnt(masm, "[ Store to keyed Property");
+
+ // Generate inlined version of the keyed store if the code is in
+ // a loop and the key is likely to be a smi.
+ Property* property = expression()->AsProperty();
+ ASSERT(property != NULL);
+ SmiAnalysis* key_smi_analysis = property->key()->type();
+
+ if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+ Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ Result value = cgen_->frame()->Pop();
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+
+ Result tmp = cgen_->allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+
+ // Determine whether the value is a constant before putting it
+ // in a register.
+ bool value_is_constant = value.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ value.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(value.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the value is a smi if it is not a constant. We
+ // can skip the write barrier for smis and constants.
+ if (!value_is_constant) {
+ __ test(value.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+
+ // Check that the key is a non-negative smi.
+ __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+ deferred->Branch(not_zero);
+
+ // Check that the receiver is not a smi.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ // Check that the receiver is a JSArray.
+ __ mov(tmp.reg(),
+ FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+ __ movzx_b(tmp.reg(),
+ FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+ __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the
+ // length of the JSArray are smis.
+ __ cmp(key.reg(),
+ FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+ deferred->Branch(greater_equal);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ mov(tmp.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ // Bind the deferred code patch site to be able to locate the
+ // fixed array map comparison. When debugging, we patch this
+ // comparison to always fail so that we will hit the IC call
+ // in the deferred code which will allow the debugger to
+ // break for fast case stores.
+ __ bind(deferred->patch_site());
+ __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ __ mov(Operand(tmp.reg(),
+ key.reg(),
+ times_2,
+ Array::kHeaderSize - kHeapObjectTag),
+ value.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+ } else {
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
@@ -6267,7 +6769,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
FloatingPointHelper::AllocateHeapNumber(masm,
&call_runtime,
ecx,
- edx);
+ edx,
+ eax);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -6375,7 +6878,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
- ecx, edx);
+ ecx, edx, eax);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -6418,9 +6921,45 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD:
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1;
+ Result answer;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &not_string1);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string1);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &not_strings);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
+ }
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
@@ -6460,22 +6999,42 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch1,
- Register scratch2) {
+ Register scratch2,
+ Register result) {
ExternalReference allocation_top =
ExternalReference::new_space_allocation_top_address();
ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ mov(Operand(scratch1), Immediate(allocation_top));
- __ mov(eax, Operand(scratch1, 0));
- __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
+ __ mov(result, Operand(scratch1, 0));
+ __ lea(scratch2, Operand(result, HeapNumber::kSize)); // scratch2: new top
__ cmp(scratch2, Operand::StaticVariable(allocation_limit));
__ j(above, need_gc, not_taken);
__ mov(Operand(scratch1, 0), scratch2); // store new top
- __ mov(Operand(eax, HeapObject::kMapOffset),
+ __ mov(Operand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
// Tag old top and use as result.
- __ add(Operand(eax), Immediate(kHeapObjectTag));
+ __ add(Operand(result), Immediate(kHeapObjectTag));
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register scratch) {
+ Label load_smi, done;
+
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ sar(scratch, kSmiTagSize);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
}
@@ -6577,13 +7136,21 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
- __ mov(edx, Operand(eax));
- // edx: operand
- FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
- // eax: allocated 'empty' number
- __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
- __ fchs();
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ if (overwrite_) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
__ bind(&done);
@@ -6727,7 +7294,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
- __ mov(eax, FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize));
+ __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ not_(eax);
__ test(eax, Immediate(0x7ff00000));
@@ -6737,7 +7304,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Shift out flag and all exponent bits, retaining only mantissa.
__ shl(eax, 12);
// Or with all low-bits of mantissa.
- __ or_(eax, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
// Return zero equal if all bits in mantissa is zero (it's an Infinity)
// and non-zero if not (it's a NaN).
__ ret(0);
@@ -6824,17 +7391,16 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
}
- // Save the return address (and get it off the stack).
+ // Push arguments below the return address.
__ pop(ecx);
-
- // Push arguments.
__ push(eax);
__ push(edx);
__ push(ecx);
// Inlined floating point compare.
// Call builtin if operands are not floating point or smi.
- FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
+ Label check_for_symbols;
+ FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx);
__ FCmp();
@@ -6858,6 +7424,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ mov(eax, 1);
__ ret(2 * kPointerSize); // eax, edx were pushed
+ // Fast negative check for symbol-to-symbol equality.
+ __ bind(&check_for_symbols);
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
+ BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
+
__ bind(&call_builtin);
// must swap argument order
__ pop(ecx);
@@ -6891,6 +7469,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
// Because builtins always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack. The receiver
@@ -6933,28 +7525,34 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
-
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // eax holds the exception.
+
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address);
- __ mov(edx, Operand::StaticVariable(handler_address));
- __ mov(ecx, Operand(edx, -1 * kPointerSize)); // get next in chain
- __ mov(Operand::StaticVariable(handler_address), ecx);
- __ mov(esp, Operand(edx));
- __ pop(edi);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Restore next handler and frame pointer, discard handler state.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+ ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
- __ pop(edx); // remove code pointer
- __ pop(edx); // remove state
+ __ pop(edx); // Remove state.
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ xor_(esi, Operand(esi)); // tentatively set context pointer to NULL
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of
+ // a JS entry frame.
+ __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
Label skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
+ ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
}
@@ -7040,51 +7638,49 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
- // Fetch top stack handler.
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address);
- __ mov(edx, Operand::StaticVariable(handler_address));
+ __ mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
- StackHandlerConstants::kStateOffset;
- __ cmp(Operand(edx, kStateOffset), Immediate(StackHandler::ENTRY));
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
- StackHandlerConstants::kNextOffset;
- __ mov(edx, Operand(edx, kNextOffset));
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ mov(esp, Operand(esp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- __ mov(eax, Operand(edx, kNextOffset));
- __ mov(Operand::StaticVariable(handler_address), eax);
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
// Set external caught exception to false.
- __ mov(eax, false);
ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(eax, false);
__ mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
__ mov(Operand::StaticVariable(pending_exception), eax);
- // Restore the stack to the address of the ENTRY handler
- __ mov(esp, Operand(edx));
-
// Clear the context pointer;
__ xor_(esi, Operand(esi));
- // Restore registers from handler.
- __ pop(edi); // PP
- __ pop(ebp); // FP
- __ pop(edx); // Code
- __ pop(edx); // State
+ // Restore fp from handler and discard handler state.
+ ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // State.
+ ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
}
@@ -7095,12 +7691,11 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// ebp: frame pointer (restored after C call)
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
- // edi: caller's parameter pointer pp (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
+ // NOTE: Invocations of builtins may return failure objects instead
+ // of a proper result. The builtin entry handles this by performing
+ // a garbage collection and retrying the builtin (twice).
StackFrame::Type frame_type = is_debug_break ?
StackFrame::EXIT_DEBUG :
@@ -7203,7 +7798,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain.
__ bind(&invoke);
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- __ push(eax); // flush TOS
// Clear any pending exceptions.
__ mov(edx,
@@ -7315,6 +7909,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+ return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.h b/V8Binding/v8/src/ia32/codegen-ia32.h
index 9b609a1..5cd50b8 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32.h
@@ -273,6 +273,14 @@ class CodeGenState BASE_EMBEDDED {
};
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+ NO_ARGUMENTS_ALLOCATION,
+ EAGER_ARGUMENTS_ALLOCATION,
+ LAZY_ARGUMENTS_ALLOCATION
+};
// -------------------------------------------------------------------------
@@ -332,12 +340,11 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
+ bool is_eval() { return is_eval_; }
// Generating deferred code.
void ProcessDeferred();
- bool is_eval() { return is_eval_; }
-
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
@@ -373,6 +380,12 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Returns the arguments allocation mode.
+ ArgumentsAllocationMode ArgumentsMode() const;
+
+ // Store the arguments object and allocate it if necessary.
+ Result StoreArgumentsObject(bool initial);
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
@@ -408,6 +421,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@@ -470,6 +484,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position);
+
void CheckStack();
struct InlineRuntimeLUT {
@@ -500,11 +522,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ // Support for construct call checks.
+ void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
- // Support for accessing the value field of an object (used by Date).
+ // Support for accessing the class and value fields of an object.
+ void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
@@ -518,57 +544,14 @@ class CodeGenerator: public AstVisitor {
void GenerateGetFramePointer(ZoneList<Expression*>* args);
- // Methods and constants for fast case switch statement support.
- //
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 5;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMaxOverheadFactor();
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMinCaseCount();
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
- int min_index,
- int range,
- Label* fail_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels);
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame);
-
- // Fast support for constant-Smi switches.
- void GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
+ // Fast support for Math.random().
+ void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+ // Fast support for Math.sin and Math.cos.
+ enum MathOp { SIN, COS };
+ void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+ inline void GenerateMathSin(ZoneList<Expression*>* args);
+ inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
diff --git a/V8Binding/v8/src/ia32/frames-ia32.h b/V8Binding/v8/src/ia32/frames-ia32.h
index aec1f48..3a7c86b 100644
--- a/V8Binding/v8/src/ia32/frames-ia32.h
+++ b/V8Binding/v8/src/ia32/frames-ia32.h
@@ -55,16 +55,10 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
- static const int kPPOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
+ static const int kFPOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kPCOffset = 3 * kPointerSize;
- // TODO(1233780): Get rid of the code slot in stack handlers.
- static const int kCodeOffset = 3 * kPointerSize;
-
- static const int kStateOffset = 4 * kPointerSize;
- static const int kPCOffset = 5 * kPointerSize;
-
- static const int kAddressDisplacement = -1 * kPointerSize;
static const int kSize = kPCOffset + kPointerSize;
};
@@ -85,12 +79,12 @@ class ExitFrameConstants : public AllStatic {
static const int kDebugMarkOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
- // Let the parameters pointer for exit frames point just below the
- // frame structure on the stack (frame pointer and return address).
- static const int kPPDisplacement = +2 * kPointerSize;
-
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
+
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
};
@@ -112,7 +106,7 @@ class JavaScriptFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
- // CallerSP-relative (aka PP-relative)
+ // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
@@ -136,157 +130,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-// ----------------------------------------------------
-
-
-
-
- // C Entry frames:
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // +-------------+
- // | entry_pc |
- // +-------------+ <--+ entry_sp
- // . |
- // . |
- // . |
- // +-------------+ |
- // -3 | entry_sp --+----+
- // e +-------------+
- // n -2 | C function |
- // t +-------------+
- // r -1 | caller_pp |
- // y +-------------+ <--- fp (frame pointer, ebp)
- // 0 | caller_fp |
- // f +-------------+
- // r 1 | caller_pc |
- // a +-------------+ <--- caller_sp (stack pointer, esp)
- // m 2 | |
- // e | arguments |
- // | |
- // +- - - - - - -+
- // | argument0 |
- // +=============+
- // | |
- // | caller |
- // higher | expressions |
- // addresses | |
-
-
- // Proper JS frames:
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // ----------- +=============+ <--- sp (stack pointer, esp)
- // | function |
- // +-------------+
- // | |
- // | expressions |
- // | |
- // +-------------+
- // a | |
- // c | locals |
- // t | |
- // i +- - - - - - -+ <---
- // v -4 | local0 | ^
- // a +-------------+ |
- // t -3 | code | |
- // i +-------------+ |
- // o -2 | context | | kLocal0Offset
- // n +-------------+ |
- // -1 | caller_pp | v
- // f +-------------+ <--- fp (frame pointer, ebp)
- // r 0 | caller_fp |
- // a +-------------+
- // m 1 | caller_pc |
- // e +-------------+ <--- caller_sp (incl. parameters)
- // 2 | |
- // | parameters |
- // | |
- // +- - - - - - -+ <---
- // -2 | parameter0 | ^
- // +-------------+ | kParam0Offset
- // -1 | receiver | v
- // ----------- +=============+ <--- pp (parameter pointer, edi)
- // 0 | function |
- // +-------------+
- // | |
- // | caller |
- // higher | expressions |
- // addresses | |
-
-
- // JS entry frames: When calling from C to JS, we construct two extra
- // frames: An entry frame (C) and a trampoline frame (JS). The
- // following pictures shows the two frames:
-
- // lower | Stack |
- // addresses | ^ |
- // | | |
- // | |
- // ----------- +=============+ <--- sp (stack pointer, esp)
- // | |
- // | parameters |
- // t | |
- // r +- - - - - - -+
- // a | parameter0 |
- // m +-------------+
- // p | receiver |
- // o +-------------+ <---
- // l | function | ^
- // i +-------------+ |
- // n -3 | code | | kLocal0Offset
- // e +-------------+
- // -2 | NULL | context is always NULL
- // +-------------+
- // f -1 | NULL | caller pp is always NULL for entry frames
- // r +-------------+ <--- fp (frame pointer, ebp)
- // a 0 | caller fp |
- // m +-------------+
- // e 1 | caller pc |
- // +-------------+ <--- caller_sp (incl. parameters)
- // | 0 |
- // ----------- +=============+ <--- pp (parameter pointer, edi)
- // | 0 |
- // +-------------+ <---
- // . ^
- // . | try-handler (HandlerOffsets::kSize)
- // . v
- // +-------------+ <---
- // -5 | next top pp |
- // +-------------+
- // e -4 | next top fp |
- // n +-------------+ <---
- // t -3 | ebx | ^
- // r +-------------+ |
- // y -2 | esi | | callee-saved registers
- // +-------------+ |
- // -1 | edi | v
- // f +-------------+ <--- fp
- // r 0 | caller fp |
- // a +-------------+ pp == NULL (parameter pointer)
- // m 1 | caller pc |
- // e +-------------+ <--- caller sp
- // 2 | code entry | ^
- // +-------------+ |
- // 3 | function | |
- // +-------------+ | arguments passed from C code
- // 4 | receiver | |
- // +-------------+ |
- // 5 | argc | |
- // +-------------+ |
- // 6 | argv | v
- // +-------------+ <---
- // | |
- // higher | |
- // addresses | |
-
-
} } // namespace v8::internal
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc
index d7f264d..004dad2 100644
--- a/V8Binding/v8/src/ia32/ic-ia32.cc
+++ b/V8Binding/v8/src/ia32/ic-ia32.cc
@@ -66,9 +66,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Test the has_named_interceptor bit in the map.
__ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
// Jump to miss if the interceptor bit is set.
__ j(not_zero, miss_label, not_taken);
+ // Bail out if we have a JS global object.
+ __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, miss_label, not_taken);
+
// Check that the properties array is a dictionary.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
@@ -141,6 +147,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
}
+const int LoadIC::kOffsetToLoadInstruction = 13;
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -747,6 +756,21 @@ void KeyedLoadIC::ClearInlinedVersion(Address address) {
}
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+ // Insert null as the elements map to check for. This will make
+ // sure that the elements fast-case map check fails so that control
+ // flows to the IC instead of the inlined version.
+ PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+ // Restore the fast-case elements map check so that the inlined
+ // version can be used again.
+ PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address = address + 4;
@@ -774,7 +798,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address = address + 4; // 4 = stub address
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
@@ -795,6 +819,16 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
// Defined in ic.cc.
Object* KeyedLoadIC_Miss(Arguments args);
diff --git a/V8Binding/v8/src/ia32/jump-target-ia32.cc b/V8Binding/v8/src/ia32/jump-target-ia32.cc
index 9644a16..587fb2d 100644
--- a/V8Binding/v8/src/ia32/jump-target-ia32.cc
+++ b/V8Binding/v8/src/ia32/jump-target-ia32.cc
@@ -164,7 +164,7 @@ void JumpTarget::Call() {
}
-void JumpTarget::DoBind(int mergable_elements) {
+void JumpTarget::DoBind() {
ASSERT(cgen() != NULL);
ASSERT(!is_bound());
@@ -210,7 +210,7 @@ void JumpTarget::DoBind(int mergable_elements) {
// Fast case: no forward jumps, possible backward ones. Remove
// constants and copies above the watermark on the fall-through
// frame and use it as the entry frame.
- cgen()->frame()->MakeMergable(mergable_elements);
+ cgen()->frame()->MakeMergable();
entry_frame_ = new VirtualFrame(cgen()->frame());
}
__ bind(&entry_label_);
@@ -252,7 +252,7 @@ void JumpTarget::DoBind(int mergable_elements) {
}
// Compute the frame to use for entry to the block.
- ComputeEntryFrame(mergable_elements);
+ ComputeEntryFrame();
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index 7636c4e..479b8ca 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -358,7 +358,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack.
- ASSERT(ExitFrameConstants::kPPDisplacement == +2 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
@@ -448,7 +448,8 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
- ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// The pc (return address) is already on TOS.
if (try_location == IN_JAVASCRIPT) {
if (type == TRY_CATCH_HANDLER) {
@@ -456,23 +457,18 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} else {
push(Immediate(StackHandler::TRY_FINALLY));
}
- push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
push(ebp);
- push(edi);
} else {
ASSERT(try_location == IN_JS_ENTRY);
- // The parameter pointer is meaningless here and ebp does not
- // point to a JS frame. So we save NULL for both pp and ebp. We
- // expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
+ // The frame pointer does not point to a JS frame so we save NULL
+ // for ebp. We expect the code throwing an exception to check ebp
+ // before dereferencing it to restore the context.
push(Immediate(StackHandler::ENTRY));
- push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
- push(Immediate(0)); // NULL frame pointer
- push(Immediate(0)); // NULL parameter pointer
+ push(Immediate(0)); // NULL frame pointer.
}
- // Cached TOS.
- mov(eax, Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- // Link this handler.
+ // Save the current handler as the next handler.
+ push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Link this handler as the new current one.
mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
}
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.h b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
index 940a8b4..42620dd 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
@@ -154,9 +154,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain.
- // The return address must be pushed before calling this helper.
- // On exit, eax contains TOS (next_sp).
+ // Push a new try handler and link into try handler chain. The return
+ // address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
@@ -286,7 +285,7 @@ class MacroAssembler: public Assembler {
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
- Handle<Object> code_object_; // This handle will be patched with the code
+ Handle<Object> code_object_; // This handle will be patched with the
// code object on installation.
// Helper functions for generating invokes.
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index b31f706..83beb65 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -475,9 +475,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name,
- Code::Flags flags) {
- ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
+ String* name) {
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@@ -518,16 +516,14 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCodeWithFlags(flags, name);
+ return GetCode(FIELD, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check,
- Code::Flags flags) {
- ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
+ CheckType check) {
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@@ -627,6 +623,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
@@ -642,7 +639,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
- return GetCodeWithFlags(flags, function_name);
+ return GetCode(CONSTANT_FUNCTION, function_name);
}
@@ -718,6 +715,59 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
+Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::call_global_inline, 1);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ // Check that the map of the global has not changed.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ __ j(not_equal, &miss, not_taken);
+
+ // Get the value from the cell.
+ __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+
+ // Check that the cell contains the same function.
+ __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+ __ j(not_equal, &miss, not_taken);
+
+ // Patch the receiver on the stack with the global proxy.
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+
+ // Setup the context (function already in edi).
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments(),
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::call_global_inline, 1);
+ __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -861,6 +911,49 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
+Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::named_store_global_inline, 1);
+
+ // Check that the map of the global has not changed.
+ __ mov(ebx, (Operand(esp, kPointerSize)));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ __ j(not_equal, &miss, not_taken);
+
+ // Store the value in the cell.
+ __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+
+ // RecordWrite clobbers the value register. Pass the value being stored in
+ // edx.
+ __ mov(edx, eax);
+ __ RecordWrite(ecx, JSGlobalPropertyCell::kValueOffset, edx, ebx);
+
+ // Return the value (register eax).
+ __ ret(0);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::named_store_global_inline, 1);
+ __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@@ -999,6 +1092,47 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
+Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
+
+ // Check that the map of the global has not changed.
+ __ mov(eax, (Operand(esp, kPointerSize)));
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(object->map())));
+ __ j(not_equal, &miss, not_taken);
+
+ // Get the value from the cell.
+ __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(equal, &miss, not_taken);
+ }
+
+ __ ret(0);
+
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::named_load_global_inline, 1);
+ __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
diff --git a/V8Binding/v8/src/ia32/virtual-frame-ia32.cc b/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
index 5f85de7..0854636 100644
--- a/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
+++ b/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
@@ -174,14 +174,8 @@ void VirtualFrame::SyncRange(int begin, int end) {
}
-void VirtualFrame::MakeMergable(int mergable_elements) {
- if (mergable_elements == JumpTarget::kAllElements) {
- mergable_elements = element_count();
- }
- ASSERT(mergable_elements <= element_count());
-
- int start_index = element_count() - mergable_elements;
- for (int i = start_index; i < element_count(); i++) {
+void VirtualFrame::MakeMergable() {
+ for (int i = 0; i < element_count(); i++) {
FrameElement element = elements_[i];
if (element.is_constant() || element.is_copy()) {
@@ -195,7 +189,7 @@ void VirtualFrame::MakeMergable(int mergable_elements) {
backing_element = elements_[element.index()];
}
Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
+ ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
@@ -224,14 +218,12 @@ void VirtualFrame::MakeMergable(int mergable_elements) {
}
}
}
- // No need to set the copied flag---there are no copies of
- // copies or constants so the original was not copied.
- elements_[i].set_static_type(element.static_type());
+ // No need to set the copied flag --- there are no copies.
} else {
- // Clear the copy flag of non-constant, non-copy elements above
- // the high water mark. They cannot be copied because copes are
- // always higher than their backing store and copies are not
- // allowed above the water mark.
+ // Clear the copy flag of non-constant, non-copy elements.
+ // They cannot be copied because copies are not allowed.
+ // The copy flag is not relied on before the end of this loop,
+ // including when registers are spilled.
elements_[i].clear_copied();
}
}
@@ -775,14 +767,10 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
void VirtualFrame::PushTryHandler(HandlerType type) {
ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less two (the return address
- // is already pushed by a call instruction, and PushTryHandler from the
- // macro assembler will leave the top of stack in the eax register to be
- // pushed separately).
- Adjust(kHandlerSize - 2);
+ // Grow the expression stack by handler size less one (the return
+ // address is already pushed by a call instruction).
+ Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type);
- // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
- EmitPush(eax);
}
@@ -1008,7 +996,6 @@ Result VirtualFrame::Pop() {
if (element.is_memory()) {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
- temp.set_static_type(element.static_type());
__ pop(temp.reg());
return temp;
}
@@ -1040,12 +1027,11 @@ Result VirtualFrame::Pop() {
FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
// Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied();
- new_element.set_static_type(element.static_type());
elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
- return Result(temp.reg(), element.static_type());
+ return Result(temp.reg());
} else if (element.is_register()) {
- return Result(element.reg(), element.static_type());
+ return Result(element.reg());
} else {
ASSERT(element.is_constant());
return Result(element.handle());
diff --git a/V8Binding/v8/src/ia32/virtual-frame-ia32.h b/V8Binding/v8/src/ia32/virtual-frame-ia32.h
index 6e6ebd5..314ea73 100644
--- a/V8Binding/v8/src/ia32/virtual-frame-ia32.h
+++ b/V8Binding/v8/src/ia32/virtual-frame-ia32.h
@@ -43,7 +43,7 @@ namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public ZoneObject {
+class VirtualFrame: public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -65,7 +65,7 @@ class VirtualFrame : public ZoneObject {
private:
bool previous_state_;
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
};
// An illegal index into the virtual frame.
@@ -78,6 +78,7 @@ class VirtualFrame : public ZoneObject {
explicit VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
@@ -87,9 +88,7 @@ class VirtualFrame : public ZoneObject {
int element_count() { return elements_.length(); }
// The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
+ int height() { return element_count() - expression_base_index(); }
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
@@ -153,11 +152,8 @@ class VirtualFrame : public ZoneObject {
void SyncRange(int begin, int end);
// Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the
- // topmost mergable_elements elements of the frame. A
- // mergable_elements of JumpTarget::kAllElements indicates constants
- // and copies are should be removed from the entire frame.
- void MakeMergable(int mergable_elements);
+ // be merged to it. Copies and constants are removed from the frame.
+ void MakeMergable();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
@@ -258,7 +254,9 @@ class VirtualFrame : public ZoneObject {
void PushReceiverSlotAddress();
// Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
+ void PushFunction() {
+ PushFrameSlotAt(function_index());
+ }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
@@ -293,7 +291,9 @@ class VirtualFrame : public ZoneObject {
}
// The receiver frame slot.
- Operand Receiver() { return ParameterAt(-1); }
+ Operand Receiver() {
+ return ParameterAt(-1);
+ }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
@@ -323,9 +323,7 @@ class VirtualFrame : public ZoneObject {
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
+ Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
// Call load IC. Name and receiver are found on top of the frame.
// Receiver is not dropped.
@@ -360,10 +358,14 @@ class VirtualFrame : public ZoneObject {
void Drop(int count);
// Drop one element.
- void Drop() { Drop(1); }
+ void Drop() {
+ Drop(1);
+ }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
+ void Dup() {
+ PushFrameSlotAt(element_count() - 1);
+ }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -381,15 +383,17 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Immediate immediate);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
- void Push(Smi* value) { Push(Handle<Object>(value)); }
+ void Push(Smi* value) {
+ Push(Handle<Object> (value));
+ }
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
@@ -421,32 +425,48 @@ class VirtualFrame : public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
+ int parameter_count() {
+ return cgen()->scope()->num_parameters();
+ }
+ int local_count() {
+ return cgen()->scope()->num_stack_slots();
+ }
// The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address
// are below the frame pointer.
- int frame_pointer() { return parameter_count() + 2; }
+ int frame_pointer() {
+ return parameter_count() + 2;
+ }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() { return 1; }
+ int param0_index() {
+ return 1;
+ }
// The index of the context slot in the frame. It is immediately
// above the frame pointer.
- int context_index() { return frame_pointer() + 1; }
+ int context_index() {
+ return frame_pointer() + 1;
+ }
// The index of the function slot in the frame. It is above the frame
// pointer and the context slot.
- int function_index() { return frame_pointer() + 2; }
+ int function_index() {
+ return frame_pointer() + 2;
+ }
// The index of the first local. Between the frame pointer and the
// locals lie the context and the function.
- int local0_index() { return frame_pointer() + 3; }
+ int local0_index() {
+ return frame_pointer() + 3;
+ }
// The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
+ int expression_base_index() {
+ return local0_index() + local_count();
+ }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
@@ -550,7 +570,6 @@ class VirtualFrame : public ZoneObject {
friend class JumpTarget;
};
-
} } // namespace v8::internal
#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/V8Binding/v8/src/ic.cc b/V8Binding/v8/src/ic.cc
index 657614a..e062dd9 100644
--- a/V8Binding/v8/src/ic.cc
+++ b/V8Binding/v8/src/ic.cc
@@ -328,11 +328,11 @@ Object* CallIC::LoadFunction(State state,
UpdateCaches(&lookup, state, object, name);
}
+ // Get the property.
+ PropertyAttributes attr;
+ result = object->GetProperty(*object, &lookup, *name, &attr);
+ if (result->IsFailure()) return result;
if (lookup.type() == INTERCEPTOR) {
- // Get the property.
- PropertyAttributes attr;
- result = object->GetProperty(*name, &attr);
- if (result->IsFailure()) return result;
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
@@ -341,11 +341,6 @@ Object* CallIC::LoadFunction(State state,
}
return TypeError("undefined_method", object, name);
}
- } else {
- // Lookup is valid and no interceptors are involved. Get the
- // property.
- result = object->GetProperty(*name);
- if (result->IsFailure()) return result;
}
ASSERT(result != Heap::the_hole_value());
@@ -423,14 +418,29 @@ void CallIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
+ if (object->IsJSGlobalObject()) {
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ if (lookup->holder() != *global) return;
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ if (cell->value()->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(cell->value());
+ code = StubCache::ComputeCallGlobal(argc, in_loop, *name, *global,
+ cell, function);
+ }
+ } else {
+ // There is only one shared stub for calling normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
+ }
break;
}
case INTERCEPTOR: {
@@ -614,12 +624,24 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeLoadNormal(*name, *receiver);
+ if (object->IsJSGlobalObject()) {
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
+ if (lookup->holder() != *global) return;
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ code = StubCache::ComputeLoadGlobal(*name, *global,
+ cell, lookup->IsDontDelete());
+ } else {
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeLoadNormal(*name, *receiver);
+ }
break;
}
case CALLBACKS: {
@@ -849,6 +871,39 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
}
+static bool StoreICableLookup(LookupResult* lookup) {
+ // Bail out if we didn't find a result.
+ if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
+
+ // If the property is read-only, we leave the IC in its current
+ // state.
+ if (lookup->IsReadOnly()) return false;
+
+ if (!lookup->IsLoaded()) return false;
+
+ return true;
+}
+
+
+static bool LookupForStoreIC(JSObject* object,
+ String* name,
+ LookupResult* lookup) {
+ object->LocalLookup(name, lookup);
+ if (!StoreICableLookup(lookup)) {
+ return false;
+ }
+
+ if (lookup->type() == INTERCEPTOR) {
+ if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
+ object->LocalLookupRealNamedProperty(name, lookup);
+ return StoreICableLookup(lookup);
+ }
+ }
+
+ return true;
+}
+
+
Object* StoreIC::Store(State state,
Handle<Object> object,
Handle<String> name,
@@ -873,12 +928,11 @@ Object* StoreIC::Store(State state,
}
// Lookup the property locally in the receiver.
- LookupResult lookup;
- receiver->LocalLookup(*name, &lookup);
-
- // Update inline cache and stub cache.
- if (FLAG_use_ic && lookup.IsLoaded()) {
- UpdateCaches(&lookup, state, receiver, name, value);
+ if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ LookupResult lookup;
+ if (LookupForStoreIC(*receiver, *name, &lookup)) {
+ UpdateCaches(&lookup, state, receiver, name, value);
+ }
}
// Set the property.
@@ -893,14 +947,9 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) {
ASSERT(lookup->IsLoaded());
// Skip JSGlobalProxy.
- if (receiver->IsJSGlobalProxy()) return;
+ ASSERT(!receiver->IsJSGlobalProxy());
- // Bail out if we didn't find a result.
- if (!lookup->IsValid() || !lookup->IsCacheable()) return;
-
- // If the property is read-only, we leave the IC in its current
- // state.
- if (lookup->IsReadOnly()) return;
+ ASSERT(StoreICableLookup(lookup));
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -926,6 +975,19 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
code = StubCache::ComputeStoreField(*name, *receiver, index, *transition);
break;
}
+ case NORMAL: {
+ if (!receiver->IsJSGlobalObject()) {
+ return;
+ }
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+ break;
+ }
case CALLBACKS: {
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
diff --git a/V8Binding/v8/src/ic.h b/V8Binding/v8/src/ic.h
index bd94fd8..7d03377 100644
--- a/V8Binding/v8/src/ic.h
+++ b/V8Binding/v8/src/ic.h
@@ -221,7 +221,7 @@ class LoadIC: public IC {
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label).
- static const int kOffsetToLoadInstruction = 13;
+ static const int kOffsetToLoadInstruction;
private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
@@ -356,6 +356,12 @@ class KeyedStoreIC: public IC {
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm);
+ // Clear the inlined version so the IC is always hit.
+ static void ClearInlinedVersion(Address address);
+
+ // Restore the inlined version so the fast case can get hit.
+ static void RestoreInlinedVersion(Address address);
+
private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
@@ -378,6 +384,11 @@ class KeyedStoreIC: public IC {
}
static void Clear(Address address, Code* target);
+
+ // Support for patching the map that is checked in an inlined
+ // version of keyed store.
+ static bool PatchInlinedStore(Address address, Object* map);
+
friend class IC;
};
diff --git a/V8Binding/v8/src/jsregexp.cc b/V8Binding/v8/src/jsregexp.cc
index 6fce1f5..879f671 100644
--- a/V8Binding/v8/src/jsregexp.cc
+++ b/V8Binding/v8/src/jsregexp.cc
@@ -51,6 +51,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
+#else
+#error Unsupported target architecture.
#endif
#include "interpreter-irregexp.h"
@@ -405,7 +407,6 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
// Prepare space for the return values.
int number_of_capture_registers =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
- OffsetsVector offsets(number_of_capture_registers);
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
@@ -421,15 +422,19 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
- int* offsets_vector = offsets.vector();
bool rc;
+ // We have to initialize this with something to make gcc happy but we can't
+ // initialize it with its real value until after the GC-causing things are
+ // over.
+ FixedArray* array = NULL;
// Dispatch to the correct RegExp implementation.
-
Handle<String> original_subject = subject;
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
if (UseNativeRegexp()) {
#if V8_TARGET_ARCH_IA32
+ OffsetsVector captures(number_of_capture_registers);
+ int* captures_vector = captures.vector();
RegExpMacroAssemblerIA32::Result res;
do {
bool is_ascii = subject->IsAsciiRepresentation();
@@ -439,8 +444,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
res = RegExpMacroAssemblerIA32::Match(code,
subject,
- offsets_vector,
- offsets.length(),
+ captures_vector,
+ captures.length(),
previous_index);
// If result is RETRY, the string have changed representation, and we
// must restart from scratch.
@@ -453,7 +458,16 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
|| res == RegExpMacroAssemblerIA32::FAILURE);
rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
-#else
+ if (!rc) return Factory::null_value();
+
+ array = last_match_info->elements();
+ ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+ // The captures come in (start, end+1) pairs.
+ for (int i = 0; i < number_of_capture_registers; i += 2) {
+ SetCapture(array, i, captures_vector[i]);
+ SetCapture(array, i + 1, captures_vector[i + 1]);
+ }
+#else // !V8_TARGET_ARCH_IA32
UNREACHABLE();
#endif
} else {
@@ -461,33 +475,36 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
}
+ // Now that we have done EnsureCompiledIrregexp we can get the number of
+ // registers.
+ int number_of_registers =
+ IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
+ OffsetsVector registers(number_of_registers);
+ int* register_vector = registers.vector();
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- offsets_vector[i] = -1;
+ register_vector[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
rc = IrregexpInterpreter::Match(byte_codes,
subject,
- offsets_vector,
+ register_vector,
previous_index);
+ if (!rc) return Factory::null_value();
+
+ array = last_match_info->elements();
+ ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
+ // The captures come in (start, end+1) pairs.
+ for (int i = 0; i < number_of_capture_registers; i += 2) {
+ SetCapture(array, i, register_vector[i]);
+ SetCapture(array, i + 1, register_vector[i + 1]);
+ }
}
- // Handle results from RegExp implementation.
-
- if (!rc) {
- return Factory::null_value();
- }
-
- FixedArray* array = last_match_info->elements();
- ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
- // The captures come in (start, end+1) pairs.
SetLastCaptureCount(array, number_of_capture_registers);
SetLastSubject(array, *original_subject);
SetLastInput(array, *original_subject);
- for (int i = 0; i < number_of_capture_registers; i+=2) {
- SetCapture(array, i, offsets_vector[i]);
- SetCapture(array, i + 1, offsets_vector[i + 1]);
- }
+
return last_match_info;
}
@@ -896,12 +913,13 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
+ // Count pushes performed to force a stack limit check occasionally.
+ int pushes = 0;
+
for (int reg = 0; reg <= max_register; reg++) {
if (!affected_registers.Get(reg)) {
continue;
}
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
// The chronologically first deferred action in the trace
// is used to infer the action needed to restore a register
@@ -1885,7 +1903,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
uint32_t differing_bits = (from ^ to);
// A mask and compare is only perfect if the differing bits form a
// number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0) {
+ if ((differing_bits & (differing_bits + 1)) == 0 &&
+ from + differing_bits == to) {
pos->determines_perfectly = true;
}
uint32_t common_bits = ~SmearBitsRight(differing_bits);
diff --git a/V8Binding/v8/src/jump-target.cc b/V8Binding/v8/src/jump-target.cc
index a8eda6b..8168dd0 100644
--- a/V8Binding/v8/src/jump-target.cc
+++ b/V8Binding/v8/src/jump-target.cc
@@ -48,7 +48,7 @@ void JumpTarget::Unuse() {
}
-void JumpTarget::ComputeEntryFrame(int mergable_elements) {
+void JumpTarget::ComputeEntryFrame() {
// Given: a collection of frames reaching by forward CFG edges and
// the directionality of the block. Compute: an entry frame for the
// block.
@@ -77,29 +77,16 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
int length = initial_frame->element_count();
ZoneList<FrameElement*> elements(length);
- // Convert the number of mergable elements (counted from the top
- // down) to a frame high-water mark (counted from the bottom up).
- // Elements strictly above the high-water index will be mergable in
- // entry frames for bidirectional jump targets.
- int high_water_mark = (mergable_elements == kAllElements)
- ? VirtualFrame::kIllegalIndex // All frame indices are above this.
- : length - mergable_elements - 1; // Top index if m_e == 0.
-
// Initially populate the list of elements based on the initial
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
- // We do not allow copies or constants in bidirectional frames. All
- // elements above the water mark on bidirectional frames have
- // unknown static types.
- if (direction_ == BIDIRECTIONAL && i > high_water_mark) {
+ // We do not allow copies or constants in bidirectional frames.
+ if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) {
elements.Add(NULL);
continue;
}
- // It's safe to change the static type on the initial frame
- // element, see comment in JumpTarget::Combine.
- initial_frame->elements_[i].set_static_type(StaticType::unknown());
}
elements.Add(&initial_frame->elements_[i]);
}
@@ -150,18 +137,12 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
// Loop over all the reaching frames to check whether the element
- // is synced on all frames, to count the registers it occupies,
- // and to compute a merged static type.
+ // is synced on all frames and to count the registers it occupies.
bool is_synced = true;
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
- StaticType type; // Initially invalid.
- if (direction_ != BIDIRECTIONAL || i < high_water_mark) {
- type = reaching_frames_[0]->elements_[i].static_type();
- }
-
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
is_synced = is_synced && element.is_synced();
@@ -175,7 +156,6 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
best_reg_num = num;
}
}
- type = type.merge(element.static_type());
}
// If the value is synced on all frames, put it in memory. This
@@ -183,7 +163,6 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// memory-to-register move when the value is needed later.
if (is_synced) {
// Already recorded as a memory element.
- entry_frame_->elements_[i].set_static_type(type);
continue;
}
@@ -198,20 +177,15 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
}
}
- if (best_reg_num == RegisterAllocator::kInvalidRegister) {
- // If there was no register found, the element is already
- // recorded as in memory.
- entry_frame_->elements_[i].set_static_type(type);
- } else {
+ if (best_reg_num != RegisterAllocator::kInvalidRegister) {
// If there was a register choice, use it. Preserve the copied
- // flag on the element. Set the static type as computed.
+ // flag on the element.
bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED);
if (is_copied) entry_frame_->elements_[i].set_copied();
- entry_frame_->elements_[i].set_static_type(type);
entry_frame_->set_register_location(reg, i);
}
}
@@ -241,25 +215,6 @@ void JumpTarget::Jump(Result* arg) {
}
-void JumpTarget::Jump(Result* arg0, Result* arg1) {
- ASSERT(cgen()->has_valid_frame());
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- DoJump();
-}
-
-
-void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
- ASSERT(cgen()->has_valid_frame());
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- cgen()->frame()->Push(arg2);
- DoJump();
-}
-
-
void JumpTarget::Branch(Condition cc, Hint hint) {
DoBranch(cc, hint);
}
@@ -295,84 +250,6 @@ void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
}
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
- ASSERT(cgen()->frame() != NULL);
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
- DECLARE_ARGCHECK_VARS(arg0);
- DECLARE_ARGCHECK_VARS(arg1);
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- DoBranch(cc, hint);
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-
- ASSERT_ARGCHECK(arg0);
- ASSERT_ARGCHECK(arg1);
-}
-
-
-void JumpTarget::Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Result* arg2,
- Hint hint) {
- ASSERT(cgen()->frame() != NULL);
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
- DECLARE_ARGCHECK_VARS(arg0);
- DECLARE_ARGCHECK_VARS(arg1);
- DECLARE_ARGCHECK_VARS(arg2);
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- cgen()->frame()->Push(arg2);
- DoBranch(cc, hint);
- *arg2 = cgen()->frame()->Pop();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-
- ASSERT_ARGCHECK(arg0);
- ASSERT_ARGCHECK(arg1);
- ASSERT_ARGCHECK(arg2);
-}
-
-
-void JumpTarget::Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Result* arg2,
- Result* arg3,
- Hint hint) {
- ASSERT(cgen()->frame() != NULL);
-
- // We want to check that non-frame registers at the call site stay in
- // the same registers on the fall-through branch.
- DECLARE_ARGCHECK_VARS(arg0);
- DECLARE_ARGCHECK_VARS(arg1);
- DECLARE_ARGCHECK_VARS(arg2);
- DECLARE_ARGCHECK_VARS(arg3);
-
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- cgen()->frame()->Push(arg2);
- cgen()->frame()->Push(arg3);
- DoBranch(cc, hint);
- *arg3 = cgen()->frame()->Pop();
- *arg2 = cgen()->frame()->Pop();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-
- ASSERT_ARGCHECK(arg0);
- ASSERT_ARGCHECK(arg1);
- ASSERT_ARGCHECK(arg2);
- ASSERT_ARGCHECK(arg3);
-}
-
-
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame());
@@ -400,66 +277,20 @@ void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
#undef ASSERT_ARGCHECK
-void JumpTarget::Bind(int mergable_elements) {
- DoBind(mergable_elements);
+void JumpTarget::Bind() {
+ DoBind();
}
-void JumpTarget::Bind(Result* arg, int mergable_elements) {
+void JumpTarget::Bind(Result* arg) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg);
}
- DoBind(mergable_elements);
+ DoBind();
*arg = cgen()->frame()->Pop();
}
-void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- }
- DoBind(mergable_elements);
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0,
- Result* arg1,
- Result* arg2,
- int mergable_elements) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- cgen()->frame()->Push(arg2);
- }
- DoBind(mergable_elements);
- *arg2 = cgen()->frame()->Pop();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0,
- Result* arg1,
- Result* arg2,
- Result* arg3,
- int mergable_elements) {
- if (cgen()->has_valid_frame()) {
- cgen()->frame()->Push(arg0);
- cgen()->frame()->Push(arg1);
- cgen()->frame()->Push(arg2);
- cgen()->frame()->Push(arg3);
- }
- DoBind(mergable_elements);
- *arg3 = cgen()->frame()->Pop();
- *arg2 = cgen()->frame()->Pop();
- *arg1 = cgen()->frame()->Pop();
- *arg0 = cgen()->frame()->Pop();
-}
-
-
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL);
@@ -531,7 +362,7 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
}
-void BreakTarget::Bind(int mergable_elements) {
+void BreakTarget::Bind() {
#ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
@@ -547,11 +378,11 @@ void BreakTarget::Bind(int mergable_elements) {
int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->ForgetElements(count);
}
- DoBind(mergable_elements);
+ DoBind();
}
-void BreakTarget::Bind(Result* arg, int mergable_elements) {
+void BreakTarget::Bind(Result* arg) {
#ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
@@ -568,7 +399,7 @@ void BreakTarget::Bind(Result* arg, int mergable_elements) {
cgen()->frame()->ForgetElements(count);
cgen()->frame()->Push(arg);
}
- DoBind(mergable_elements);
+ DoBind();
*arg = cgen()->frame()->Pop();
}
diff --git a/V8Binding/v8/src/jump-target.h b/V8Binding/v8/src/jump-target.h
index 7585faf..0c42f1b 100644
--- a/V8Binding/v8/src/jump-target.h
+++ b/V8Binding/v8/src/jump-target.h
@@ -107,52 +107,18 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// jump and there will be no current frame after the jump.
virtual void Jump();
virtual void Jump(Result* arg);
- void Jump(Result* arg0, Result* arg1);
- void Jump(Result* arg0, Result* arg1, Result* arg2);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
- void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Result* arg2,
- Hint hint = no_hint);
- void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Result* arg2,
- Result* arg3,
- Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
- //
- // The number of mergable elements is a number of frame elements
- // counting from the top down which must be "mergable" (not
- // constants or copies) in the entry frame at the jump target.
- // Backward jumps to the target must contain the same constants and
- // sharing as the entry frame, except for the mergable elements.
- //
- // A mergable elements argument of kAllElements indicates that all
- // frame elements must be mergable. Mergable elements are ignored
- // completely for forward-only jump targets.
- virtual void Bind(int mergable_elements = kAllElements);
- virtual void Bind(Result* arg, int mergable_elements = kAllElements);
- void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
- void Bind(Result* arg0,
- Result* arg1,
- Result* arg2,
- int mergable_elements = kAllElements);
- void Bind(Result* arg0,
- Result* arg1,
- Result* arg2,
- Result* arg3,
- int mergable_elements = kAllElements);
+ virtual void Bind();
+ virtual void Bind(Result* arg);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
@@ -160,8 +126,6 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// after the call is the same as the frame before the call.
void Call();
- static const int kAllElements = -1; // Not a valid number of elements.
-
static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag;
}
@@ -188,7 +152,7 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// return values using the virtual frame.
void DoJump();
void DoBranch(Condition cc, Hint hint);
- void DoBind(int mergable_elements);
+ void DoBind();
private:
static bool compiling_deferred_code_;
@@ -202,9 +166,8 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// target.
inline void InitializeEntryElement(int index, FrameElement* target);
- // Compute a frame to use for entry to this block. Mergable
- // elements is as described for the Bind function.
- void ComputeEntryFrame(int mergable_elements);
+ // Compute a frame to use for entry to this block.
+ void ComputeEntryFrame();
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
};
@@ -251,8 +214,8 @@ class BreakTarget : public JumpTarget {
// Bind a break target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
- virtual void Bind(int mergable_elements = kAllElements);
- virtual void Bind(Result* arg, int mergable_elements = kAllElements);
+ virtual void Bind();
+ virtual void Bind(Result* arg);
// Setter for expected height.
void set_expected_height(int expected) { expected_height_ = expected; }
diff --git a/V8Binding/v8/src/log-inl.h b/V8Binding/v8/src/log-inl.h
new file mode 100644
index 0000000..1844d2b
--- /dev/null
+++ b/V8Binding/v8/src/log-inl.h
@@ -0,0 +1,126 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOG_INL_H_
+#define V8_LOG_INL_H_
+
+#include "log.h"
+
+namespace v8 {
+namespace internal {
+
+//
+// VMState class implementation. A simple stack of VM states held by the
+// logger and partially threaded through the call stack. States are pushed by
+// VMState construction and popped by destruction.
+//
+#ifdef ENABLE_LOGGING_AND_PROFILING
+inline const char* StateToString(StateTag state) {
+ switch (state) {
+ case JS:
+ return "JS";
+ case GC:
+ return "GC";
+ case COMPILER:
+ return "COMPILER";
+ case OTHER:
+ return "OTHER";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+VMState::VMState(StateTag state) : disabled_(true) {
+ if (!Logger::is_logging()) {
+ return;
+ }
+
+ disabled_ = false;
+#if !defined(ENABLE_HEAP_PROTECTION)
+ // When not protecting the heap, there is no difference between
+ // EXTERNAL and OTHER. As an optimization in that case, we will not
+ // perform EXTERNAL->OTHER transitions through the API. We thus
+ // compress the two states into one.
+ if (state == EXTERNAL) state = OTHER;
+#endif
+ state_ = state;
+ previous_ = Logger::current_state_;
+ Logger::current_state_ = this;
+
+ if (FLAG_log_state_changes) {
+ LOG(UncheckedStringEvent("Entering", StateToString(state_)));
+ if (previous_ != NULL) {
+ LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
+ }
+ }
+
+#ifdef ENABLE_HEAP_PROTECTION
+ if (FLAG_protect_heap && previous_ != NULL) {
+ if (state_ == EXTERNAL) {
+ // We are leaving V8.
+ ASSERT(previous_->state_ != EXTERNAL);
+ Heap::Protect();
+ } else if (previous_->state_ == EXTERNAL) {
+ // We are entering V8.
+ Heap::Unprotect();
+ }
+ }
+#endif
+}
+
+
+VMState::~VMState() {
+ if (disabled_) return;
+ Logger::current_state_ = previous_;
+
+ if (FLAG_log_state_changes) {
+ LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
+ if (previous_ != NULL) {
+ LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
+ }
+ }
+
+#ifdef ENABLE_HEAP_PROTECTION
+ if (FLAG_protect_heap && previous_ != NULL) {
+ if (state_ == EXTERNAL) {
+ // We are reentering V8.
+ ASSERT(previous_->state_ != EXTERNAL);
+ Heap::Unprotect();
+ } else if (previous_->state_ == EXTERNAL) {
+ // We are leaving V8.
+ Heap::Protect();
+ }
+ }
+#endif
+}
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_LOG_INL_H_
diff --git a/V8Binding/v8/src/log-utils.cc b/V8Binding/v8/src/log-utils.cc
index 4361049..b31864b 100644
--- a/V8Binding/v8/src/log-utils.cc
+++ b/V8Binding/v8/src/log-utils.cc
@@ -123,7 +123,7 @@ bool Log::is_stopped_ = false;
Log::WritePtr Log::Write = NULL;
FILE* Log::output_handle_ = NULL;
LogDynamicBuffer* Log::output_buffer_ = NULL;
-// Must be the same message as in Logger::PauseProfiler
+// Must be the same message as in Logger::PauseProfiler.
const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
Mutex* Log::mutex_ = NULL;
char* Log::message_buffer_ = NULL;
@@ -173,6 +173,9 @@ void Log::Close() {
}
Write = NULL;
+ DeleteArray(message_buffer_);
+ message_buffer_ = NULL;
+
delete mutex_;
mutex_ = NULL;
@@ -212,13 +215,13 @@ void LogMessageBuilder::Append(const char* format, ...) {
Log::kMessageBufferSize - pos_);
va_list args;
va_start(args, format);
- Append(format, args);
+ AppendVA(format, args);
va_end(args);
ASSERT(pos_ <= Log::kMessageBufferSize);
}
-void LogMessageBuilder::Append(const char* format, va_list args) {
+void LogMessageBuilder::AppendVA(const char* format, va_list args) {
Vector<char> buf(Log::message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args);
@@ -250,6 +253,33 @@ void LogMessageBuilder::Append(String* str) {
}
+void LogMessageBuilder::AppendAddress(Address addr) {
+ static Address last_address_ = NULL;
+ AppendAddress(addr, last_address_);
+ last_address_ = addr;
+}
+
+
+void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
+ if (!FLAG_compress_log) {
+ Append("0x%" V8PRIxPTR, addr);
+ } else if (bias == NULL) {
+ Append("%" V8PRIxPTR, addr);
+ } else {
+ uintptr_t delta;
+ char sign;
+ if (addr >= bias) {
+ delta = addr - bias;
+ sign = '+';
+ } else {
+ delta = bias - addr;
+ sign = '-';
+ }
+ Append("%c%" V8PRIxPTR, sign, delta);
+ }
+}
+
+
void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
int len = str->length();
@@ -280,6 +310,24 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
}
+bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
+ return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
+}
+
+
+bool LogMessageBuilder::RetrieveCompressedPrevious(
+ LogRecordCompressor* compressor, const char* prefix) {
+ pos_ = 0;
+ if (prefix[0] != '\0') Append(prefix);
+ Vector<char> prev_record(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
+ if (!has_prev) return false;
+ pos_ += prev_record.length();
+ return true;
+}
+
+
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = Log::Write(Log::message_buffer_, pos_);
@@ -297,6 +345,145 @@ void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
}
}
+
+// Formatting string for back references to the whole line. E.g. "#2" means
+// "the second line above".
+const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
+
+// Formatting string for back references. E.g. "#2:10" means
+// "the second line above, start from char 10 (0-based)".
+const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
+
+
+LogRecordCompressor::~LogRecordCompressor() {
+ for (int i = 0; i < buffer_.length(); ++i) {
+ buffer_[i].Dispose();
+ }
+}
+
+
+static int GetNumberLength(int number) {
+ ASSERT(number >= 0);
+ ASSERT(number < 10000);
+ if (number < 10) return 1;
+ if (number < 100) return 2;
+ if (number < 1000) return 3;
+ return 4;
+}
+
+
+int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
+ // See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
+ return pos == 0 ? GetNumberLength(distance) + 1
+ : GetNumberLength(distance) + GetNumberLength(pos) + 2;
+}
+
+
+void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
+ int distance,
+ int pos) {
+ if (pos == 0) {
+ OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
+ } else {
+ OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
+ }
+}
+
+
+bool LogRecordCompressor::Store(const Vector<const char>& record) {
+ // Check if the record is the same as the last stored one.
+ if (curr_ != -1) {
+ Vector<const char>& curr = buffer_[curr_];
+ if (record.length() == curr.length()
+ && strncmp(record.start(), curr.start(), record.length()) == 0) {
+ return false;
+ }
+ }
+ // buffer_ is circular.
+ prev_ = curr_++;
+ curr_ %= buffer_.length();
+ Vector<char> record_copy = Vector<char>::New(record.length());
+ memcpy(record_copy.start(), record.start(), record.length());
+ buffer_[curr_].Dispose();
+ buffer_[curr_] =
+ Vector<const char>(record_copy.start(), record_copy.length());
+ return true;
+}
+
+
+bool LogRecordCompressor::RetrievePreviousCompressed(
+ Vector<char>* prev_record) {
+ if (prev_ == -1) return false;
+
+ int index = prev_;
+ // Distance from prev_.
+ int distance = 0;
+ // Best compression result among records in the buffer.
+ struct {
+ intptr_t truncated_len;
+ int distance;
+ int copy_from_pos;
+ int backref_size;
+ } best = {-1, 0, 0, 0};
+ Vector<const char>& prev = buffer_[prev_];
+ const char* const prev_start = prev.start();
+ const char* const prev_end = prev.start() + prev.length();
+ do {
+ // We're moving backwards until we reach the current record.
+ // Remember that buffer_ is circular.
+ if (--index == -1) index = buffer_.length() - 1;
+ ++distance;
+ if (index == curr_) break;
+
+ Vector<const char>& data = buffer_[index];
+ if (data.start() == NULL) break;
+ const char* const data_end = data.start() + data.length();
+ const char* prev_ptr = prev_end;
+ const char* data_ptr = data_end;
+ // Compare strings backwards, stop on the last matching character.
+ while (prev_ptr != prev_start && data_ptr != data.start()
+ && *(prev_ptr - 1) == *(data_ptr - 1)) {
+ --prev_ptr;
+ --data_ptr;
+ }
+ const intptr_t truncated_len = prev_end - prev_ptr;
+ const int copy_from_pos = data_ptr - data.start();
+ // Check if the length of compressed tail is enough.
+ if (truncated_len <= kMaxBackwardReferenceSize
+ && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
+ continue;
+ }
+
+ // Record compression results.
+ if (truncated_len > best.truncated_len) {
+ best.truncated_len = truncated_len;
+ best.distance = distance;
+ best.copy_from_pos = copy_from_pos;
+ best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
+ }
+ } while (true);
+
+ if (best.distance == 0) {
+ // Can't compress the previous record. Return as is.
+ ASSERT(prev_record->length() >= prev.length());
+ memcpy(prev_record->start(), prev.start(), prev.length());
+ prev_record->Truncate(prev.length());
+ } else {
+ // Copy the uncompressible part unchanged.
+ const intptr_t unchanged_len = prev.length() - best.truncated_len;
+ // + 1 for '\0'.
+ ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
+ memcpy(prev_record->start(), prev.start(), unchanged_len);
+ // Append the backward reference.
+ Vector<char> backref(
+ prev_record->start() + unchanged_len, best.backref_size + 1);
+ PrintBackwardReference(backref, best.distance, best.copy_from_pos);
+ ASSERT(strlen(backref.start()) - best.backref_size == 0);
+ prev_record->Truncate(unchanged_len + best.backref_size);
+ }
+ return true;
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/log-utils.h b/V8Binding/v8/src/log-utils.h
index 2e8b3a3..ad669d5 100644
--- a/V8Binding/v8/src/log-utils.h
+++ b/V8Binding/v8/src/log-utils.h
@@ -170,6 +170,50 @@ class Log : public AllStatic {
static char* message_buffer_;
friend class LogMessageBuilder;
+ friend class LogRecordCompressor;
+};
+
+
+// An utility class for performing backward reference compression
+// of string ends. It operates using a window of previous strings.
+class LogRecordCompressor {
+ public:
+ // 'window_size' is the size of backward lookup window.
+ explicit LogRecordCompressor(int window_size)
+ : buffer_(window_size + kNoCompressionWindowSize),
+ kMaxBackwardReferenceSize(
+ GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
+ curr_(-1), prev_(-1) {
+ }
+
+ ~LogRecordCompressor();
+
+ // Fills vector with a compressed version of the previous record.
+ // Returns false if there is no previous record.
+ bool RetrievePreviousCompressed(Vector<char>* prev_record);
+
+ // Stores a record if it differs from a previous one (or there's no previous).
+ // Returns true, if the record has been stored.
+ bool Store(const Vector<const char>& record);
+
+ private:
+ // The minimum size of a buffer: a place needed for the current and
+ // the previous record. Since there is no place for precedessors of a previous
+ // record, it can't be compressed at all.
+ static const int kNoCompressionWindowSize = 2;
+
+ // Formatting strings for back references.
+ static const char* kLineBackwardReferenceFormat;
+ static const char* kBackwardReferenceFormat;
+
+ static int GetBackwardReferenceSize(int distance, int pos);
+
+ static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
+
+ ScopedVector< Vector<const char> > buffer_;
+ const int kMaxBackwardReferenceSize;
+ int curr_;
+ int prev_;
};
@@ -186,7 +230,7 @@ class LogMessageBuilder BASE_EMBEDDED {
void Append(const char* format, ...);
// Append string data to the log message.
- void Append(const char* format, va_list args);
+ void AppendVA(const char* format, va_list args);
// Append a character to the log message.
void Append(const char c);
@@ -194,8 +238,29 @@ class LogMessageBuilder BASE_EMBEDDED {
// Append a heap string.
void Append(String* str);
+ // Appends an address, compressing it if needed by offsetting
+ // from Logger::last_address_.
+ void AppendAddress(Address addr);
+
+ // Appends an address, compressing it if needed.
+ void AppendAddress(Address addr, Address bias);
+
void AppendDetailed(String* str, bool show_impl_info);
+ // Stores log message into compressor, returns true if the message
+ // was stored (i.e. doesn't repeat the previous one).
+ bool StoreInCompressor(LogRecordCompressor* compressor);
+
+ // Sets log message to a previous version of compressed message.
+ // Returns false, if there is no previous message.
+ bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
+ return RetrieveCompressedPrevious(compressor, "");
+ }
+
+ // Does the same at the version without arguments, and sets a prefix.
+ bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
+ const char* prefix);
+
// Write the log message to the log file currently opened.
void WriteToLogFile();
diff --git a/V8Binding/v8/src/log.cc b/V8Binding/v8/src/log.cc
index c1edf4d..0dba08d 100644
--- a/V8Binding/v8/src/log.cc
+++ b/V8Binding/v8/src/log.cc
@@ -31,9 +31,7 @@
#include "bootstrapper.h"
#include "log.h"
-#include "log-utils.h"
#include "macro-assembler.h"
-#include "platform.h"
#include "serialize.h"
#include "string-stream.h"
@@ -262,6 +260,7 @@ void Profiler::Engage() {
Logger::ticker_->SetProfiler(this);
Logger::ProfilerBeginEvent();
+ Logger::LogAliases();
}
@@ -301,17 +300,41 @@ Profiler* Logger::profiler_ = NULL;
VMState* Logger::current_state_ = NULL;
VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
+const char** Logger::log_events_ = NULL;
+CompressionHelper* Logger::compression_helper_ = NULL;
+bool Logger::is_logging_ = false;
+#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
+const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
+};
+#undef DECLARE_LONG_EVENT
-bool Logger::IsEnabled() {
- return Log::IsEnabled();
-}
+#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
+const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
+};
+#undef DECLARE_SHORT_EVENT
void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+ if (FLAG_compress_log) {
+ msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
+ }
+ msg.WriteToLogFile();
+}
+
+
+void Logger::LogAliases() {
+ if (!Log::IsEnabled() || !FLAG_compress_log) return;
+ LogMessageBuilder msg;
+ for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
+ msg.Append("alias,%s,%s\n",
+ kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
+ }
msg.WriteToLogFile();
}
@@ -373,7 +396,7 @@ void Logger::ApiEvent(const char* format, ...) {
LogMessageBuilder msg;
va_list ap;
va_start(ap, format);
- msg.Append(format, ap);
+ msg.AppendVA(format, ap);
va_end(ap);
msg.WriteToLogFile();
}
@@ -397,26 +420,30 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
void Logger::SharedLibraryEvent(const char* library_path,
- unsigned start,
- unsigned end) {
+ uintptr_t start,
+ uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
- msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
- start, end);
+ msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+ library_path,
+ start,
+ end);
msg.WriteToLogFile();
#endif
}
void Logger::SharedLibraryEvent(const wchar_t* library_path,
- unsigned start,
- unsigned end) {
+ uintptr_t start,
+ uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg;
- msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
- start, end);
+ msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
+ library_path,
+ start,
+ end);
msg.WriteToLogFile();
#endif
}
@@ -594,12 +621,51 @@ void Logger::DeleteEvent(const char* name, void* object) {
}
-void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// A class that contains all common code dealing with record compression.
+class CompressionHelper {
+ public:
+ explicit CompressionHelper(int window_size)
+ : compressor_(window_size), repeat_count_(0) { }
+
+ // Handles storing message in compressor, retrieving the previous one and
+ // prefixing it with repeat count, if needed.
+ // Returns true if message needs to be written to log.
+ bool HandleMessage(LogMessageBuilder* msg) {
+ if (!msg->StoreInCompressor(&compressor_)) {
+ // Current message repeats the previous one, don't write it.
+ ++repeat_count_;
+ return false;
+ }
+ if (repeat_count_ == 0) {
+ return msg->RetrieveCompressedPrevious(&compressor_);
+ }
+ OS::SNPrintF(prefix_, "%s,%d,",
+ Logger::log_events_[Logger::REPEAT_META_EVENT],
+ repeat_count_ + 1);
+ repeat_count_ = 0;
+ return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
+ }
+
+ private:
+ LogRecordCompressor compressor_;
+ int repeat_count_;
+ EmbeddedVector<char, 20> prefix_;
+};
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", tag, code->address(),
- code->ExecutableSize());
+ msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
if (*p == '"') {
msg.Append('\\');
@@ -607,26 +673,37 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
msg.Append(*p);
}
msg.Append('"');
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
-void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s\"\n",
- tag, code->address(), code->ExecutableSize(), *str);
+ msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
-void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, String* name,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
@@ -635,23 +712,32 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s %s:%d\"\n",
- tag, code->address(),
- code->ExecutableSize(),
- *str, *sourcestr, line);
+ msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"%s %s:%d\"",
+ code->ExecutableSize(), *str, *sourcestr, line);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
-void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"args_count: %d\"\n", tag,
- code->address(),
- code->ExecutableSize(),
- args_count);
+ msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -661,23 +747,17 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", "RegExp",
- code->address(),
- code->ExecutableSize());
+ msg.Append("%s,%s,",
+ log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
+ msg.AppendAddress(code->address());
+ msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
- msg.Append("\"\n");
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.Append("code-allocate,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n",
- code->address(),
- assem);
+ msg.Append('\"');
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -685,9 +765,19 @@ void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-move,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", from, to);
+ msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
+ msg.AppendAddress(from);
+ msg.Append(',');
+ msg.AppendAddress(to, prev_to_);
+ prev_to_ = to;
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -697,7 +787,13 @@ void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("code-delete,0x%" V8PRIxPTR "\n", from);
+ msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+ msg.AppendAddress(from);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@@ -802,14 +898,26 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
+ static Address prev_sp = NULL;
LogMessageBuilder msg;
- msg.Append("tick,0x%" V8PRIxPTR ",0x%" V8PRIxPTR ",%d",
- sample->pc, sample->sp, static_cast<int>(sample->state));
+ msg.Append("%s,", log_events_[TICK_EVENT]);
+ Address prev_addr = reinterpret_cast<Address>(sample->pc);
+ msg.AppendAddress(prev_addr);
+ msg.Append(',');
+ msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
+ prev_sp = reinterpret_cast<Address>(sample->sp);
+ msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
}
for (int i = 0; i < sample->frames_count; ++i) {
- msg.Append(",0x%" V8PRIxPTR, sample->stack[i]);
+ msg.Append(',');
+ msg.AppendAddress(sample->stack[i], prev_addr);
+ prev_addr = sample->stack[i];
+ }
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
@@ -832,6 +940,7 @@ void Logger::PauseProfiler() {
// Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause"));
}
+ is_logging_ = false;
}
@@ -839,6 +948,7 @@ void Logger::ResumeProfiler() {
if (!profiler_->paused() || !Log::IsEnabled()) {
return;
}
+ is_logging_ = true;
if (FLAG_prof_lazy) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
@@ -912,18 +1022,19 @@ void Logger::LogCompiledFunctions() {
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
- line_num += script->line_offset()->value() + 1;
- LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name,
- *script_name, line_num));
+ LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+ shared->code(), *func_name,
+ *script_name, line_num + 1));
} else {
// Can't distinguish enum and script here, so always use Script.
- LOG(CodeCreateEvent("Script", shared->code(), *script_name));
+ LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
+ shared->code(), *script_name));
}
continue;
}
}
// If no script or script has no name.
- LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name));
+ LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
DeleteArray(sfis);
@@ -954,9 +1065,11 @@ bool Logger::Setup() {
FLAG_prof_auto = false;
}
- bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_prof_lazy;
+ || FLAG_log_regexp || FLAG_log_state_changes;
+
+ bool open_log_file = start_logging || FLAG_prof_lazy;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
@@ -1013,10 +1126,21 @@ bool Logger::Setup() {
sliding_state_window_ = new SlidingStateWindow();
}
+ log_events_ = FLAG_compress_log ?
+ kCompressedLogEventsNames : kLongLogEventsNames;
+ if (FLAG_compress_log) {
+ compression_helper_ = new CompressionHelper(kCompressionWindowSize);
+ }
+
+ is_logging_ = start_logging;
+
if (FLAG_prof) {
profiler_ = new Profiler();
- if (!FLAG_prof_auto)
+ if (!FLAG_prof_auto) {
profiler_->pause();
+ } else {
+ is_logging_ = true;
+ }
profiler_->Engage();
}
@@ -1041,6 +1165,9 @@ void Logger::TearDown() {
profiler_ = NULL;
}
+ delete compression_helper_;
+ compression_helper_ = NULL;
+
delete sliding_state_window_;
sliding_state_window_ = NULL;
@@ -1071,85 +1198,4 @@ void Logger::EnableSlidingStateWindow() {
}
-//
-// VMState class implementation. A simple stack of VM states held by the
-// logger and partially threaded through the call stack. States are pushed by
-// VMState construction and popped by destruction.
-//
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static const char* StateToString(StateTag state) {
- switch (state) {
- case JS:
- return "JS";
- case GC:
- return "GC";
- case COMPILER:
- return "COMPILER";
- case OTHER:
- return "OTHER";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-VMState::VMState(StateTag state) {
-#if !defined(ENABLE_HEAP_PROTECTION)
- // When not protecting the heap, there is no difference between
- // EXTERNAL and OTHER. As an optimization in that case, we will not
- // perform EXTERNAL->OTHER transitions through the API. We thus
- // compress the two states into one.
- if (state == EXTERNAL) state = OTHER;
-#endif
- state_ = state;
- previous_ = Logger::current_state_;
- Logger::current_state_ = this;
-
- if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Entering", StateToString(state_)));
- if (previous_ != NULL) {
- LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
- }
- }
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap && previous_ != NULL) {
- if (state_ == EXTERNAL) {
- // We are leaving V8.
- ASSERT(previous_->state_ != EXTERNAL);
- Heap::Protect();
- } else if (previous_->state_ == EXTERNAL) {
- // We are entering V8.
- Heap::Unprotect();
- }
- }
-#endif
-}
-
-
-VMState::~VMState() {
- Logger::current_state_ = previous_;
-
- if (FLAG_log_state_changes) {
- LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
- if (previous_ != NULL) {
- LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
- }
- }
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap && previous_ != NULL) {
- if (state_ == EXTERNAL) {
- // We are reentering V8.
- ASSERT(previous_->state_ != EXTERNAL);
- Heap::Unprotect();
- } else if (previous_->state_ == EXTERNAL) {
- // We are leaving V8.
- Heap::Protect();
- }
- }
-#endif
-}
-#endif
-
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/log.h b/V8Binding/v8/src/log.h
index 2f8f81c..f68234f 100644
--- a/V8Binding/v8/src/log.h
+++ b/V8Binding/v8/src/log.h
@@ -28,6 +28,9 @@
#ifndef V8_LOG_H_
#define V8_LOG_H_
+#include "platform.h"
+#include "log-utils.h"
+
namespace v8 {
namespace internal {
@@ -71,12 +74,13 @@ class Profiler;
class Semaphore;
class SlidingStateWindow;
class LogMessageBuilder;
+class CompressionHelper;
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(Call) \
do { \
- if (v8::internal::Logger::IsEnabled()) \
+ if (v8::internal::Logger::is_logging()) \
v8::internal::Logger::Call; \
} while (false)
#else
@@ -87,12 +91,13 @@ class LogMessageBuilder;
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- explicit VMState(StateTag state);
- ~VMState();
+ inline explicit VMState(StateTag state);
+ inline ~VMState();
StateTag state() { return state_; }
private:
+ bool disabled_;
StateTag state_;
VMState* previous_;
#else
@@ -102,8 +107,41 @@ class VMState BASE_EMBEDDED {
};
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+ V(CODE_CREATION_EVENT, "code-creation", "cc") \
+ V(CODE_MOVE_EVENT, "code-move", "cm") \
+ V(CODE_DELETE_EVENT, "code-delete", "cd") \
+ V(TICK_EVENT, "tick", "t") \
+ V(REPEAT_META_EVENT, "repeat", "r") \
+ V(BUILTIN_TAG, "Builtin", "bi") \
+ V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak", "cdb") \
+ V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi") \
+ V(CALL_IC_TAG, "CallIC", "cic") \
+ V(CALL_INITIALIZE_TAG, "CallInitialize", "ci") \
+ V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic", "cmm") \
+ V(CALL_MISS_TAG, "CallMiss", "cm") \
+ V(CALL_NORMAL_TAG, "CallNormal", "cn") \
+ V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
+ V(EVAL_TAG, "Eval", "e") \
+ V(FUNCTION_TAG, "Function", "f") \
+ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
+ V(KEYED_STORE_IC_TAG, "KeyedStoreIC", "ksic") \
+ V(LAZY_COMPILE_TAG, "LazyCompile", "lc") \
+ V(LOAD_IC_TAG, "LoadIC", "lic") \
+ V(REG_EXP_TAG, "RegExp", "re") \
+ V(SCRIPT_TAG, "Script", "sc") \
+ V(STORE_IC_TAG, "StoreIC", "sic") \
+ V(STUB_TAG, "Stub", "s")
+
class Logger {
public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+ enum LogEventsAndTags {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
+ NUMBER_OF_LOG_EVENTS
+ };
+#undef DECLARE_ENUM
+
// Acquires resources for logging if the right flags are set.
static bool Setup();
@@ -163,14 +201,14 @@ class Logger {
// ==== Events logged by --log-code. ====
// Emits a code create event.
- static void CodeCreateEvent(const char* tag, Code* code, const char* source);
- static void CodeCreateEvent(const char* tag, Code* code, String* name);
- static void CodeCreateEvent(const char* tag, Code* code, String* name,
+ static void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, const char* source);
+ static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
+ static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
String* source, int line);
- static void CodeCreateEvent(const char* tag, Code* code, int args_count);
+ static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
// Emits a code create event for a RegExp.
static void RegExpCodeCreateEvent(Code* code, String* source);
- static void CodeAllocateEvent(Code* code, Assembler* assem);
// Emits a code move event.
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
@@ -183,11 +221,11 @@ class Logger {
static void HeapSampleItemEvent(const char* type, int number, int bytes);
static void SharedLibraryEvent(const char* library_path,
- unsigned start,
- unsigned end);
+ uintptr_t start,
+ uintptr_t end);
static void SharedLibraryEvent(const wchar_t* library_path,
- unsigned start,
- unsigned end);
+ uintptr_t start,
+ uintptr_t end);
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
@@ -202,7 +240,9 @@ class Logger {
return current_state_ ? current_state_->state() : OTHER;
}
- static bool IsEnabled();
+ static bool is_logging() {
+ return is_logging_;
+ }
// Pause/Resume collection of profiling data.
// When data collection is paused, Tick events are discarded until
@@ -223,9 +263,15 @@ class Logger {
// Profiler's sampling interval (in milliseconds).
static const int kSamplingIntervalMs = 1;
+ // Size of window used for log records compression.
+ static const int kCompressionWindowSize = 4;
+
// Emits the profiler's first message.
static void ProfilerBeginEvent();
+ // Emits aliases for compressed messages.
+ static void LogAliases();
+
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
@@ -261,8 +307,15 @@ class Logger {
// recent VM states.
static SlidingStateWindow* sliding_state_window_;
+ // An array of log events names.
+ static const char** log_events_;
+
+ // An instance of helper created if log compression is enabled.
+ static CompressionHelper* compression_helper_;
+
// Internal implementation classes with access to
// private members.
+ friend class CompressionHelper;
friend class EventLog;
friend class TimeLog;
friend class Profiler;
@@ -270,8 +323,10 @@ class Logger {
friend class VMState;
friend class LoggerTestHelper;
+
+ static bool is_logging_;
#else
- static bool is_enabled() { return false; }
+ static bool is_logging() { return false; }
#endif
};
diff --git a/V8Binding/v8/src/macro-assembler.h b/V8Binding/v8/src/macro-assembler.h
index 116381b..983802e 100644
--- a/V8Binding/v8/src/macro-assembler.h
+++ b/V8Binding/v8/src/macro-assembler.h
@@ -47,6 +47,8 @@
#include "arm/assembler-arm-inl.h"
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
+#else
+#error Unsupported target architecture.
#endif
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/V8Binding/v8/src/macros.py b/V8Binding/v8/src/macros.py
index ebfd816..c75f0ea 100644
--- a/V8Binding/v8/src/macros.py
+++ b/V8Binding/v8/src/macros.py
@@ -60,6 +60,7 @@ const msPerSecond = 1000;
const msPerMinute = 60000;
const msPerHour = 3600000;
const msPerDay = 86400000;
+const msPerMonth = 2592000000;
# For apinatives.js
const kUninitialized = -1;
@@ -81,13 +82,16 @@ macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_REGEXP(arg) = %HasRegExpClass(arg);
-macro IS_ARRAY(arg) = %HasArrayClass(arg);
-macro IS_DATE(arg) = %HasDateClass(arg);
-macro IS_NUMBER_WRAPPER(arg) = %HasNumberClass(arg);
-macro IS_STRING_WRAPPER(arg) = %HasStringClass(arg);
-macro IS_ERROR(arg) = (%ClassOf(arg) === 'Error');
-macro IS_SCRIPT(arg) = (%ClassOf(arg) === 'Script');
+macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp');
+macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
+macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
+macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
+macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
+macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
+macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
+macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
+macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro FLOOR(arg) = %Math_floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
@@ -110,6 +114,10 @@ const REGEXP_FIRST_CAPTURE = 3;
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
+# Gets the value of a Date object. If arg is not a Date object
+# a type error is thrown.
+macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
+
# Last input and last subject are after the captures so we can omit them on
# results returned from global searches. Beware - these evaluate their
# arguments twice.
diff --git a/V8Binding/v8/src/mark-compact.cc b/V8Binding/v8/src/mark-compact.cc
index 56e4ea6..5e46f2a 100644
--- a/V8Binding/v8/src/mark-compact.cc
+++ b/V8Binding/v8/src/mark-compact.cc
@@ -947,13 +947,18 @@ void EncodeFreeRegion(Address free_start, int free_size) {
// Try to promote all objects in new space. Heap numbers and sequential
-// strings are promoted to the code space, all others to the old space.
+// strings are promoted to the code space, large objects to large object space,
+// and all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
- OldSpace* target_space = Heap::TargetSpace(object);
- ASSERT(target_space == Heap::old_pointer_space() ||
- target_space == Heap::old_data_space());
- Object* forwarded = target_space->MCAllocateRaw(object_size);
-
+ Object* forwarded;
+ if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+ forwarded = Failure::Exception();
+ } else {
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space() ||
+ target_space == Heap::old_data_space());
+ forwarded = target_space->MCAllocateRaw(object_size);
+ }
if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
}
@@ -1136,7 +1141,7 @@ static void SweepSpace(NewSpace* space) {
// We give non-live objects a map that will correctly give their size,
// since their existing map might not be live after the collection.
int size = object->Size();
- if (size >= Array::kHeaderSize) {
+ if (size >= ByteArray::kHeaderSize) {
object->set_map(Heap::byte_array_map());
ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
} else {
diff --git a/V8Binding/v8/src/math.js b/V8Binding/v8/src/math.js
index 86d6dd1..1f5ce87 100644
--- a/V8Binding/v8/src/math.js
+++ b/V8Binding/v8/src/math.js
@@ -44,39 +44,73 @@ $Math.__proto__ = global.Object.prototype;
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
- if (%_IsSmi(x)) {
- return x >= 0 ? x : -x;
- } else {
- return %Math_abs(ToNumber(x));
- }
+ if (%_IsSmi(x)) return x >= 0 ? x : -x;
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_abs(x);
}
// ECMA 262 - 15.8.2.2
-function MathAcos(x) { return %Math_acos(ToNumber(x)); }
+function MathAcos(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_acos(x);
+}
// ECMA 262 - 15.8.2.3
-function MathAsin(x) { return %Math_asin(ToNumber(x)); }
+function MathAsin(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_asin(x);
+}
// ECMA 262 - 15.8.2.4
-function MathAtan(x) { return %Math_atan(ToNumber(x)); }
+function MathAtan(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_atan(x);
+}
// ECMA 262 - 15.8.2.5
-function MathAtan2(x, y) { return %Math_atan2(ToNumber(x), ToNumber(y)); }
+function MathAtan2(x, y) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ if (!IS_NUMBER(y)) y = ToNumber(y);
+ return %Math_atan2(x, y);
+}
// ECMA 262 - 15.8.2.6
-function MathCeil(x) { return %Math_ceil(ToNumber(x)); }
+function MathCeil(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_ceil(x);
+}
// ECMA 262 - 15.8.2.7
-function MathCos(x) { return %Math_cos(ToNumber(x)); }
+function MathCos(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %_Math_cos(x);
+}
// ECMA 262 - 15.8.2.8
-function MathExp(x) { return %Math_exp(ToNumber(x)); }
+function MathExp(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_exp(x);
+}
// ECMA 262 - 15.8.2.9
-function MathFloor(x) { return %Math_floor(ToNumber(x)); }
+function MathFloor(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ if (0 < x && x <= 0x7FFFFFFF) {
+ // Numbers in the range [0, 2^31) can be floored by converting
+ // them to an unsigned 32-bit value using the shift operator.
+ // We avoid doing so for -0, because the result of Math.floor(-0)
+ // has to be -0, which wouldn't be the case with the shift.
+ return x << 0;
+ } else {
+ return %Math_floor(x);
+ }
+}
// ECMA 262 - 15.8.2.10
-function MathLog(x) { return %Math_log(ToNumber(x)); }
+function MathLog(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_log(x);
+}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
@@ -103,22 +137,40 @@ function MathMin(arg1, arg2) { // length == 2
}
// ECMA 262 - 15.8.2.13
-function MathPow(x, y) { return %Math_pow(ToNumber(x), ToNumber(y)); }
+function MathPow(x, y) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ if (!IS_NUMBER(y)) y = ToNumber(y);
+ return %Math_pow(x, y);
+}
// ECMA 262 - 15.8.2.14
-function MathRandom() { return %Math_random(); }
+function MathRandom() {
+ return %_RandomPositiveSmi() / 0x40000000;
+}
// ECMA 262 - 15.8.2.15
-function MathRound(x) { return %Math_round(ToNumber(x)); }
+function MathRound(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_round(x);
+}
// ECMA 262 - 15.8.2.16
-function MathSin(x) { return %Math_sin(ToNumber(x)); }
+function MathSin(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %_Math_sin(x);
+}
// ECMA 262 - 15.8.2.17
-function MathSqrt(x) { return %Math_sqrt(ToNumber(x)); }
+function MathSqrt(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_sqrt(x);
+}
// ECMA 262 - 15.8.2.18
-function MathTan(x) { return %Math_tan(ToNumber(x)); }
+function MathTan(x) {
+ if (!IS_NUMBER(x)) x = ToNumber(x);
+ return %Math_tan(x);
+}
// -------------------------------------------------------------------
diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js
index df8a2d1..882fed5 100644
--- a/V8Binding/v8/src/messages.js
+++ b/V8Binding/v8/src/messages.js
@@ -37,13 +37,13 @@ function GetInstanceName(cons) {
if (cons.length == 0) {
return "";
}
- var first = cons.charAt(0).toLowerCase();
+ var first = %StringToLowerCase(StringCharAt.call(cons, 0));
var mapping = kVowelSounds;
- if (cons.length > 1 && (cons.charAt(0) != first)) {
+ if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
// First char is upper case
- var second = cons.charAt(1).toLowerCase();
+ var second = %StringToLowerCase(StringCharAt.call(cons, 1));
// Second char is upper case
- if (cons.charAt(1) != second)
+ if (StringCharAt.call(cons, 1) != second)
mapping = kCapitalVowelSounds;
}
var s = mapping[first] ? "an " : "a ";
@@ -126,7 +126,7 @@ function FormatString(format, args) {
var str;
try { str = ToDetailString(args[i]); }
catch (e) { str = "#<error>"; }
- result = result.split("%" + i).join(str);
+ result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
}
return result;
}
@@ -146,17 +146,9 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) {
- if (args instanceof $Array) {
- for (var i = 0; i < args.length; i++) {
- var elem = args[i];
- if (elem instanceof $Array && elem.length > 100) { // arbitrary limit, grab a reasonable slice to report
- args[i] = elem.slice(0,20).concat("...");
- }
- }
- } else if (IS_UNDEFINED(args)) {
+ if (IS_UNDEFINED(args)) {
args = [];
}
-
var e = new constructor(kAddMessageAccessorsMarker);
e.type = type;
e.arguments = args;
@@ -230,6 +222,40 @@ function MakeError(type, args) {
return MakeGenericError($Error, type, args);
}
+/**
+ * Find a line number given a specific source position.
+ * @param {number} position The source position.
+ * @return {number} 0 if input too small, -1 if input too large,
+ else the line number.
+ */
+Script.prototype.lineFromPosition = function(position) {
+ var lower = 0;
+ var upper = this.lineCount() - 1;
+
+ // We'll never find invalid positions so bail right away.
+ if (position > this.line_ends[upper]) {
+ return -1;
+ }
+
+ // This means we don't have to safe-guard indexing line_ends[i - 1].
+ if (position <= this.line_ends[0]) {
+ return 0;
+ }
+
+ // Binary search to find line # from position range.
+ while (upper >= 1) {
+ var i = (lower + upper) >> 1;
+
+ if (position > this.line_ends[i]) {
+ lower = i + 1;
+ } else if (position <= this.line_ends[i - 1]) {
+ upper = i - 1;
+ } else {
+ return i;
+ }
+ }
+ return -1;
+}
/**
* Get information on a specific source position.
@@ -241,25 +267,13 @@ function MakeError(type, args) {
*/
Script.prototype.locationFromPosition = function (position,
include_resource_offset) {
- var lineCount = this.lineCount();
- var line = -1;
- if (position <= this.line_ends[0]) {
- line = 0;
- } else {
- for (var i = 1; i < lineCount; i++) {
- if (this.line_ends[i - 1] < position && position <= this.line_ends[i]) {
- line = i;
- break;
- }
- }
- }
-
+ var line = this.lineFromPosition(position);
if (line == -1) return null;
// Determine start, end and column.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line];
- if (end > 0 && this.source.charAt(end - 1) == '\r') end--;
+ if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
var column = position - start;
// Adjust according to the offset within the resource.
@@ -308,16 +322,13 @@ Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_p
if (line == 0) {
return this.locationFromPosition(offset_position + column, false);
} else {
- // Find the line where the offset position is located
- var lineCount = this.lineCount();
- var offset_line;
- for (var i = 0; i < lineCount; i++) {
- if (offset_position <= this.line_ends[i]) {
- offset_line = i;
- break;
- }
+ // Find the line where the offset position is located.
+ var offset_line = this.lineFromPosition(offset_position);
+
+ if (offset_line == -1 || offset_line + line >= this.lineCount()) {
+ return null;
}
- if (offset_line + line >= lineCount) return null;
+
return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
}
}
@@ -375,7 +386,7 @@ Script.prototype.sourceLine = function (opt_line) {
// Return the source line.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line];
- return this.source.substring(start, end);
+ return StringSubstring.call(this.source, start, end);
}
@@ -479,7 +490,7 @@ SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
* Source text for this location.
*/
SourceLocation.prototype.sourceText = function () {
- return this.script.source.substring(this.start, this.end);
+ return StringSubstring.call(this.script.source, this.start, this.end);
};
@@ -516,7 +527,7 @@ function SourceSlice(script, from_line, to_line, from_position, to_position) {
* the line terminating characters (if any)
*/
SourceSlice.prototype.sourceText = function () {
- return this.script.source.substring(this.from_position, this.to_position);
+ return StringSubstring.call(this.script.source, this.from_position, this.to_position);
};
@@ -546,55 +557,9 @@ function MakeMessage(type, args, startPos, endPos, script, stackTrace) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
- try {
- return UnsafeGetStackTraceLine(recv, fun, pos, isGlobal);
- } catch (e) {
- return "<error: " + e + ">";
- }
+ return FormatSourcePosition(new CallSite(recv, fun, pos));
}
-
-function GetFunctionName(fun, recv) {
- var name = %FunctionGetName(fun);
- if (name) return name;
- for (var prop in recv) {
- if (recv[prop] === fun)
- return prop;
- }
- return "[anonymous]";
-}
-
-
-function UnsafeGetStackTraceLine(recv, fun, pos, isTopLevel) {
- var result = "";
- // The global frame has no meaningful function or receiver
- if (!isTopLevel) {
- // If the receiver is not the global object then prefix the
- // message send
- if (recv !== global)
- result += ToDetailString(recv) + ".";
- result += GetFunctionName(fun, recv);
- }
- if (pos != -1) {
- var script = %FunctionGetScript(fun);
- var file;
- if (script) {
- file = %FunctionGetScript(fun).data;
- }
- if (file) {
- var location = %FunctionGetScript(fun).locationFromPosition(pos, true);
- if (!isTopLevel) result += "(";
- result += file;
- if (location != null) {
- result += ":" + (location.line + 1) + ":" + (location.column + 1);
- }
- if (!isTopLevel) result += ")";
- }
- }
- return (result) ? " at " + result : result;
-}
-
-
// ----------------------------------------------------------------------------
// Error implementation
@@ -621,6 +586,197 @@ function DefineOneShotAccessor(obj, name, fun) {
});
}
+function CallSite(receiver, fun, pos) {
+ this.receiver = receiver;
+ this.fun = fun;
+ this.pos = pos;
+}
+
+CallSite.prototype.getThis = function () {
+ return this.receiver;
+};
+
+CallSite.prototype.getTypeName = function () {
+ var constructor = this.receiver.constructor;
+ if (!constructor)
+ return $Object.prototype.toString.call(this.receiver);
+ var constructorName = constructor.name;
+ if (!constructorName)
+ return $Object.prototype.toString.call(this.receiver);
+ return constructorName;
+};
+
+CallSite.prototype.isToplevel = function () {
+ if (this.receiver == null)
+ return true;
+ var className = $Object.prototype.toString.call(this.receiver);
+ return IS_GLOBAL(this.receiver);
+};
+
+CallSite.prototype.isEval = function () {
+ var script = %FunctionGetScript(this.fun);
+ return script && script.compilation_type == 1;
+};
+
+CallSite.prototype.getEvalOrigin = function () {
+ var script = %FunctionGetScript(this.fun);
+ if (!script || script.compilation_type != 1)
+ return null;
+ return new CallSite(null, script.eval_from_function,
+ script.eval_from_position);
+};
+
+CallSite.prototype.getFunctionName = function () {
+ // See if the function knows its own name
+ var name = this.fun.name;
+ if (name)
+ return name;
+ // See if we can find a unique property on the receiver that holds
+ // this function.
+ for (var prop in this.receiver) {
+ if (this.receiver[prop] === this.fun) {
+ // If we find more than one match bail out to avoid confusion
+ if (name)
+ return null;
+ name = prop;
+ }
+ }
+ if (name)
+ return name;
+ // Maybe this is an evaluation?
+ var script = %FunctionGetScript(this.fun);
+ if (script && script.compilation_type == 1)
+ return "eval";
+ return null;
+};
+
+CallSite.prototype.getFileName = function () {
+ var script = %FunctionGetScript(this.fun);
+ return script ? script.name : null;
+};
+
+CallSite.prototype.getLineNumber = function () {
+ if (this.pos == -1)
+ return null;
+ var script = %FunctionGetScript(this.fun);
+ var location = null;
+ if (script) {
+ location = script.locationFromPosition(this.pos, true);
+ }
+ return location ? location.line + 1 : null;
+};
+
+CallSite.prototype.getColumnNumber = function () {
+ if (this.pos == -1)
+ return null;
+ var script = %FunctionGetScript(this.fun);
+ var location = null;
+ if (script) {
+ location = script.locationFromPosition(this.pos, true);
+ }
+ return location ? location.column : null;
+};
+
+CallSite.prototype.isNative = function () {
+ var script = %FunctionGetScript(this.fun);
+ return script ? (script.type == 0) : false;
+};
+
+CallSite.prototype.getPosition = function () {
+ return this.pos;
+};
+
+CallSite.prototype.isConstructor = function () {
+ var constructor = this.receiver ? this.receiver.constructor : null;
+ if (!constructor)
+ return false;
+ return this.fun === constructor;
+};
+
+function FormatSourcePosition(frame) {
+ var fileLocation = "";
+ if (frame.isNative()) {
+ fileLocation = "native";
+ } else if (frame.isEval()) {
+ fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin());
+ } else {
+ var fileName = frame.getFileName();
+ if (fileName) {
+ fileLocation += fileName;
+ var lineNumber = frame.getLineNumber();
+ if (lineNumber != null) {
+ fileLocation += ":" + lineNumber;
+ var columnNumber = frame.getColumnNumber();
+ if (columnNumber) {
+ fileLocation += ":" + columnNumber;
+ }
+ }
+ }
+ }
+ if (!fileLocation) {
+ fileLocation = "unknown source";
+ }
+ var line = "";
+ var functionName = frame.getFunctionName();
+ if (functionName) {
+ if (frame.isToplevel()) {
+ line += functionName;
+ } else if (frame.isConstructor()) {
+ line += "new " + functionName;
+ } else {
+ line += frame.getTypeName() + "." + functionName;
+ }
+ line += " (" + fileLocation + ")";
+ } else {
+ line += fileLocation;
+ }
+ return line;
+}
+
+function FormatStackTrace(error, frames) {
+ var lines = [];
+ try {
+ lines.push(error.toString());
+ } catch (e) {
+ try {
+ lines.push("<error: " + e + ">");
+ } catch (ee) {
+ lines.push("<error>");
+ }
+ }
+ for (var i = 0; i < frames.length; i++) {
+ var frame = frames[i];
+ try {
+ var line = FormatSourcePosition(frame);
+ } catch (e) {
+ try {
+ var line = "<error: " + e + ">";
+ } catch (ee) {
+ // Any code that reaches this point is seriously nasty!
+ var line = "<error>";
+ }
+ }
+ lines.push(" at " + line);
+ }
+ return lines.join("\n");
+}
+
+function FormatRawStackTrace(error, raw_stack) {
+ var frames = [ ];
+ for (var i = 0; i < raw_stack.length; i += 3) {
+ var recv = raw_stack[i];
+ var fun = raw_stack[i+1];
+ var pc = raw_stack[i+2];
+ var pos = %FunctionGetPositionForOffset(fun, pc);
+ frames.push(new CallSite(recv, fun, pos));
+ }
+ if (IS_FUNCTION($Error.prepareStackTrace)) {
+ return $Error.prepareStackTrace(error, frames);
+ } else {
+ return FormatStackTrace(error, frames);
+ }
+}
+
function DefineError(f) {
// Store the error function in both the global object
// and the runtime object. The function is fetched
@@ -648,7 +804,7 @@ function DefineError(f) {
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
f.prototype.name = name;
%SetCode(f, function(m) {
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
if (m === kAddMessageAccessorsMarker) {
DefineOneShotAccessor(this, 'message', function (obj) {
return FormatMessage({type: obj.type, args: obj.arguments});
@@ -656,6 +812,12 @@ function DefineError(f) {
} else if (!IS_UNDEFINED(m)) {
this.message = ToString(m);
}
+ if ($Error.captureStackTraces) {
+ var raw_stack = %CollectStackTrace(f);
+ DefineOneShotAccessor(this, 'stack', function (obj) {
+ return FormatRawStackTrace(obj, raw_stack);
+ });
+ }
} else {
return new f(m);
}
diff --git a/V8Binding/v8/src/mirror-delay.js b/V8Binding/v8/src/mirror-delay.js
index f5a12c7..76ae75b 100644
--- a/V8Binding/v8/src/mirror-delay.js
+++ b/V8Binding/v8/src/mirror-delay.js
@@ -34,9 +34,14 @@ RegExp;
Date;
+// Handle id counters.
var next_handle_ = 0;
+var next_transient_handle_ = -1;
+
+// Mirror cache.
var mirror_cache_ = [];
+
/**
* Clear the mirror handle cache.
*/
@@ -50,19 +55,25 @@ function ClearMirrorCache() {
* Returns the mirror for a specified value or object.
*
* @param {value or Object} value the value or object to retreive the mirror for
+ * @param {boolean} transient indicate whether this object is transient and
+ * should not be added to the mirror cache. The default is not transient.
* @returns {Mirror} the mirror reflects the passed value or object
*/
-function MakeMirror(value) {
+function MakeMirror(value, opt_transient) {
var mirror;
- for (id in mirror_cache_) {
- mirror = mirror_cache_[id];
- if (mirror.value() === value) {
- return mirror;
- }
- // Special check for NaN as NaN == NaN is false.
- if (mirror.isNumber() && isNaN(mirror.value()) &&
- typeof value == 'number' && isNaN(value)) {
- return mirror;
+
+ // Look for non transient mirrors in the mirror cache.
+ if (!opt_transient) {
+ for (id in mirror_cache_) {
+ mirror = mirror_cache_[id];
+ if (mirror.value() === value) {
+ return mirror;
+ }
+ // Special check for NaN as NaN == NaN is false.
+ if (mirror.isNumber() && isNaN(mirror.value()) &&
+ typeof value == 'number' && isNaN(value)) {
+ return mirror;
+ }
}
}
@@ -89,7 +100,7 @@ function MakeMirror(value) {
} else if (IS_SCRIPT(value)) {
mirror = new ScriptMirror(value);
} else {
- mirror = new ObjectMirror(value);
+ mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
}
mirror_cache_[mirror.handle()] = mirror;
@@ -155,6 +166,7 @@ const PROPERTY_TYPE = 'property';
const FRAME_TYPE = 'frame';
const SCRIPT_TYPE = 'script';
const CONTEXT_TYPE = 'context';
+const SCOPE_TYPE = 'scope';
// Maximum length when sending strings through the JSON protocol.
const kMaxProtocolStringLength = 80;
@@ -185,6 +197,13 @@ PropertyAttribute.DontEnum = DONT_ENUM;
PropertyAttribute.DontDelete = DONT_DELETE;
+// A copy of the scope types from runtime.cc.
+ScopeType = { Global: 0,
+ Local: 1,
+ With: 2,
+ Closure: 3 };
+
+
// Mirror hierarchy:
// - Mirror
// - ValueMirror
@@ -373,6 +392,15 @@ Mirror.prototype.isContext = function() {
/**
+ * Check whether the mirror reflects a scope.
+ * @returns {boolean} True if the mirror reflects a scope
+ */
+Mirror.prototype.isScope = function() {
+ return this instanceof ScopeMirror;
+}
+
+
+/**
* Allocate a handle id for this object.
*/
Mirror.prototype.allocateHandle_ = function() {
@@ -380,6 +408,15 @@ Mirror.prototype.allocateHandle_ = function() {
}
+/**
+ * Allocate a transient handle id for this object. Transient handles are
+ * negative.
+ */
+Mirror.prototype.allocateTransientHandle_ = function() {
+ this.handle_ = next_transient_handle_--;
+}
+
+
Mirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass.
return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
@@ -390,13 +427,19 @@ Mirror.prototype.toText = function() {
* Base class for all value mirror objects.
* @param {string} type The type of the mirror
* @param {value} value The value reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ * transient handle
* @constructor
* @extends Mirror
*/
-function ValueMirror(type, value) {
+function ValueMirror(type, value, transient) {
Mirror.call(this, type);
this.value_ = value;
- this.allocateHandle_();
+ if (!transient) {
+ this.allocateHandle_();
+ } else {
+ this.allocateTransientHandle_();
+ }
}
inherits(ValueMirror, Mirror);
@@ -525,17 +568,19 @@ StringMirror.prototype.toText = function() {
/**
* Mirror object for objects.
* @param {object} value The object reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ * transient handle
* @constructor
* @extends ValueMirror
*/
-function ObjectMirror(value, type) {
- ValueMirror.call(this, type || OBJECT_TYPE, value);
+function ObjectMirror(value, type, transient) {
+ ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
}
inherits(ObjectMirror, ValueMirror);
ObjectMirror.prototype.className = function() {
- return %ClassOf(this.value_);
+ return %_ClassOf(this.value_);
};
@@ -1080,7 +1125,7 @@ PropertyMirror.prototype.isIndexed = function() {
PropertyMirror.prototype.value = function() {
- return MakeMirror(this.value_);
+ return MakeMirror(this.value_, false);
}
@@ -1135,7 +1180,7 @@ PropertyMirror.prototype.getter = function() {
if (this.hasGetter()) {
return MakeMirror(this.getter_);
} else {
- return new UndefinedMirror();
+ return GetUndefinedMirror();
}
}
@@ -1149,7 +1194,7 @@ PropertyMirror.prototype.setter = function() {
if (this.hasSetter()) {
return MakeMirror(this.setter_);
} else {
- return new UndefinedMirror();
+ return GetUndefinedMirror();
}
}
@@ -1294,6 +1339,11 @@ FrameDetails.prototype.localValue = function(index) {
}
+FrameDetails.prototype.scopeCount = function() {
+ return %GetScopeCount(this.break_id_, this.frameId());
+}
+
+
/**
* Mirror object for stack frames.
* @param {number} break_id The break id in the VM for which this frame is
@@ -1419,6 +1469,16 @@ FrameMirror.prototype.sourceLineText = function() {
};
+FrameMirror.prototype.scopeCount = function() {
+ return this.details_.scopeCount();
+};
+
+
+FrameMirror.prototype.scope = function(index) {
+ return new ScopeMirror(this, index);
+};
+
+
FrameMirror.prototype.evaluate = function(source, disable_break) {
var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
source, Boolean(disable_break));
@@ -1562,6 +1622,70 @@ FrameMirror.prototype.toText = function(opt_locals) {
}
+const kScopeDetailsTypeIndex = 0;
+const kScopeDetailsObjectIndex = 1;
+
+function ScopeDetails(frame, index) {
+ this.break_id_ = frame.break_id_;
+ this.details_ = %GetScopeDetails(frame.break_id_,
+ frame.details_.frameId(),
+ index);
+}
+
+
+ScopeDetails.prototype.type = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kScopeDetailsTypeIndex];
+}
+
+
+ScopeDetails.prototype.object = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kScopeDetailsObjectIndex];
+}
+
+
+/**
+ * Mirror object for scope.
+ * @param {FrameMirror} frame The frame this scope is a part of
+ * @param {number} index The scope index in the frame
+ * @constructor
+ * @extends Mirror
+ */
+function ScopeMirror(frame, index) {
+ Mirror.call(this, SCOPE_TYPE);
+ this.frame_index_ = frame.index_;
+ this.scope_index_ = index;
+ this.details_ = new ScopeDetails(frame, index);
+}
+inherits(ScopeMirror, Mirror);
+
+
+ScopeMirror.prototype.frameIndex = function() {
+ return this.frame_index_;
+};
+
+
+ScopeMirror.prototype.scopeIndex = function() {
+ return this.scope_index_;
+};
+
+
+ScopeMirror.prototype.scopeType = function() {
+ return this.details_.type();
+};
+
+
+ScopeMirror.prototype.scopeObject = function() {
+ // For local and closure scopes create a transient mirror as these objects are
+ // created on the fly materializing the local or closure scopes and
+ // therefore will not preserve identity.
+ var transient = this.scopeType() == ScopeType.Local ||
+ this.scopeType() == ScopeType.Closure;
+ return MakeMirror(this.details_.object(), transient);
+};
+
+
/**
* Mirror object for script source.
* @param {Script} script The script object
@@ -1771,8 +1895,8 @@ JSONProtocolSerializer.prototype.includeSource_ = function() {
}
-JSONProtocolSerializer.prototype.compactFormat_ = function() {
- return this.options_ && this.options_.compactFormat;
+JSONProtocolSerializer.prototype.inlineRefs_ = function() {
+ return this.options_ && this.options_.inlineRefs;
}
@@ -1829,13 +1953,14 @@ JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
return o;
};
+
JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
details) {
// If serializing a reference to a mirror just return the reference and add
// the mirror to the referenced mirrors.
if (reference &&
(mirror.isValue() || mirror.isScript() || mirror.isContext())) {
- if (this.compactFormat_() && mirror.isValue()) {
+ if (this.inlineRefs_() && mirror.isValue()) {
return this.serializeReferenceWithDisplayData_(mirror);
} else {
this.add_(mirror);
@@ -1900,6 +2025,11 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
this.serializeFrame_(mirror, content);
break;
+ case SCOPE_TYPE:
+ // Add object representation.
+ this.serializeScope_(mirror, content);
+ break;
+
case SCRIPT_TYPE:
// Script is represented by id, name and source attributes.
if (mirror.name()) {
@@ -1921,7 +2051,10 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.sourceLength = mirror.source().length;
content.scriptType = mirror.scriptType();
content.compilationType = mirror.compilationType();
- if (mirror.compilationType() == 1) { // Compilation type eval.
+ // For compilation type eval emit information on the script from which
+ // eval was called if a script is present.
+ if (mirror.compilationType() == 1 &&
+ mirror.evalFromFunction().script()) {
content.evalFromScript =
this.serializeReference(mirror.evalFromFunction().script());
var evalFromLocation = mirror.evalFromLocation()
@@ -2042,7 +2175,7 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
result.name = propertyMirror.name();
var propertyValue = propertyMirror.value();
- if (this.compactFormat_() && propertyValue.isValue()) {
+ if (this.inlineRefs_() && propertyValue.isValue()) {
result.value = this.serializeReferenceWithDisplayData_(propertyValue);
} else {
if (propertyMirror.attributes() != PropertyAttribute.None) {
@@ -2099,6 +2232,25 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;
}
+
+ content.scopes = [];
+ for (var i = 0; i < mirror.scopeCount(); i++) {
+ var scope = mirror.scope(i);
+ content.scopes.push({
+ type: scope.scopeType(),
+ index: i
+ });
+ }
+}
+
+
+JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
+ content.index = mirror.scopeIndex();
+ content.frameIndex = mirror.frameIndex();
+ content.type = mirror.scopeType();
+ content.object = this.inlineRefs_() ?
+ this.serializeValue(mirror.scopeObject()) :
+ this.serializeReference(mirror.scopeObject());
}
diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc
index f42adf9..eec0be7 100644
--- a/V8Binding/v8/src/objects-debug.cc
+++ b/V8Binding/v8/src/objects-debug.cc
@@ -152,7 +152,9 @@ void HeapObject::HeapObjectPrint() {
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint();
break;
-
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
Name::cast(this)->Name##Print(); \
@@ -214,6 +216,9 @@ void HeapObject::HeapObjectVerify() {
case JS_BUILTINS_OBJECT_TYPE:
JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
break;
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
+ break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
break;
@@ -392,6 +397,7 @@ static const char* TypeToString(InstanceType type) {
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
case ODDBALL_TYPE: return "ODDBALL";
+ case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
@@ -428,6 +434,9 @@ void Map::MapPrint() {
if (is_undetectable()) {
PrintF(" - undetectable\n");
}
+ if (needs_loading()) {
+ PrintF(" - needs_loading\n");
+ }
if (has_instance_call_handler()) {
PrintF(" - instance_call_handler\n");
}
@@ -653,6 +662,17 @@ void Oddball::OddballVerify() {
}
+void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
+ CHECK(IsJSGlobalPropertyCell());
+ VerifyObjectField(kValueOffset);
+}
+
+
+void JSGlobalPropertyCell::JSGlobalPropertyCellPrint() {
+ HeapObject::PrintHeader("JSGlobalPropertyCell");
+}
+
+
void Code::CodePrint() {
HeapObject::PrintHeader("Code");
#ifdef ENABLE_DISASSEMBLER
diff --git a/V8Binding/v8/src/objects-inl.h b/V8Binding/v8/src/objects-inl.h
index d34e465..c360fd7 100644
--- a/V8Binding/v8/src/objects-inl.h
+++ b/V8Binding/v8/src/objects-inl.h
@@ -53,6 +53,13 @@ Smi* PropertyDetails::AsSmi() {
}
+PropertyDetails PropertyDetails::AsDeleted() {
+ PropertyDetails d(DONT_ENUM, NORMAL);
+ Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
+ return PropertyDetails(smi);
+}
+
+
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
ASSERT(object->Is##type()); \
@@ -409,6 +416,13 @@ bool Object::IsOddball() {
}
+bool Object::IsJSGlobalPropertyCell() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type()
+ == JS_GLOBAL_PROPERTY_CELL_TYPE;
+}
+
+
bool Object::IsSharedFunctionInfo() {
return Object::IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
@@ -481,11 +495,6 @@ bool Object::IsMapCache() {
}
-bool Object::IsLookupCache() {
- return IsHashTable();
-}
-
-
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -659,6 +668,12 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INTPTR_FIELD(p, offset) \
+ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INTPTR_FIELD(p, offset, value) \
+ (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
@@ -1045,6 +1060,8 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(JSGlobalPropertyCell, value, Object, kValueOffset)
+
int JSObject::GetHeaderSize() {
switch (map()->instance_type()) {
case JS_GLOBAL_PROXY_TYPE:
@@ -1304,7 +1321,6 @@ int DescriptorArray::Search(String* name) {
}
-
String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number)));
@@ -1388,7 +1404,6 @@ CAST_ACCESSOR(Dictionary)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(LookupCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqAsciiString)
@@ -1404,6 +1419,7 @@ CAST_ACCESSOR(Failure)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(JSGlobalPropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(JSFunction)
@@ -1786,11 +1802,17 @@ int Map::inobject_properties() {
int HeapObject::SizeFromMap(Map* map) {
InstanceType instance_type = map->instance_type();
- // Only inline the two most frequent cases.
- if (instance_type == JS_OBJECT_TYPE) return map->instance_size();
+ // Only inline the most frequent cases.
+ if (instance_type == JS_OBJECT_TYPE ||
+ (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
+ (kStringTag | kConsStringTag) ||
+ instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) {
return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
}
+ if (instance_type == BYTE_ARRAY_TYPE) {
+ return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
+ }
// Otherwise do the general size computation.
return SlowSizeFromMap(map);
}
@@ -2130,6 +2152,7 @@ ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
@@ -2303,12 +2326,12 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Address Proxy::proxy() {
- return AddressFrom<Address>(READ_INT_FIELD(this, kProxyOffset));
+ return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
}
void Proxy::set_proxy(Address value) {
- WRITE_INT_FIELD(this, kProxyOffset, OffsetFrom(value));
+ WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
}
@@ -2639,6 +2662,13 @@ void Map::ClearCodeCache() {
}
+void JSArray::EnsureSize(int required_size) {
+ ASSERT(HasFastElements());
+ if (elements()->length() >= required_size) return;
+ Expand(required_size);
+}
+
+
void JSArray::SetContent(FixedArray* storage) {
set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
set_elements(storage);
diff --git a/V8Binding/v8/src/objects.cc b/V8Binding/v8/src/objects.cc
index 0546578..2ba7d36 100644
--- a/V8Binding/v8/src/objects.cc
+++ b/V8Binding/v8/src/objects.cc
@@ -138,7 +138,7 @@ void Object::Lookup(String* name, LookupResult* result) {
} else if (IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
}
- ASSERT(holder != NULL); // cannot handle null or undefined.
+ ASSERT(holder != NULL); // Cannot handle null or undefined.
JSObject::cast(holder)->Lookup(name, result);
}
@@ -399,6 +399,88 @@ Object* JSObject::DeleteLazyProperty(LookupResult* result,
}
+Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+ ASSERT(!HasFastProperties());
+ Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
+ if (IsJSGlobalObject()) {
+ value = JSGlobalPropertyCell::cast(value)->value();
+ }
+ ASSERT(!value->IsJSGlobalPropertyCell());
+ return value;
+}
+
+
+Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) {
+ ASSERT(!HasFastProperties());
+ if (IsJSGlobalObject()) {
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(
+ property_dictionary()->ValueAt(result->GetDictionaryEntry()));
+ cell->set_value(value);
+ } else {
+ property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ }
+ return value;
+}
+
+
+Object* JSObject::SetNormalizedProperty(String* name,
+ Object* value,
+ PropertyDetails details) {
+ ASSERT(!HasFastProperties());
+ int entry = property_dictionary()->FindStringEntry(name);
+ if (entry == Dictionary::kNotFound) {
+ Object* store_value = value;
+ if (IsJSGlobalObject()) {
+ store_value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (store_value->IsFailure()) return store_value;
+ }
+ Object* dict =
+ property_dictionary()->AddStringEntry(name, store_value, details);
+ if (dict->IsFailure()) return dict;
+ set_properties(Dictionary::cast(dict));
+ return value;
+ }
+ // Preserve enumeration index.
+ details = PropertyDetails(details.attributes(),
+ details.type(),
+ property_dictionary()->DetailsAt(entry).index());
+ if (IsJSGlobalObject()) {
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry));
+ cell->set_value(value);
+ // Please note we have to update the property details.
+ property_dictionary()->DetailsAtPut(entry, details);
+ } else {
+ property_dictionary()->SetStringEntry(entry, name, value, details);
+ }
+ return value;
+}
+
+
+Object* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
+ ASSERT(!HasFastProperties());
+ Dictionary* dictionary = property_dictionary();
+ int entry = dictionary->FindStringEntry(name);
+ if (entry != Dictionary::kNotFound) {
+ // If we have a global object set the cell to the hole.
+ if (IsJSGlobalObject()) {
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.IsDontDelete() && mode != FORCE_DELETION) {
+ return Heap::false_value();
+ }
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
+ cell->set_value(Heap::the_hole_value());
+ dictionary->DetailsAtPut(entry, details.AsDeleted());
+ } else {
+ return dictionary->DeleteProperty(entry, mode);
+ }
+ }
+ return Heap::true_value();
+}
+
+
Object* Object::GetProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -449,8 +531,7 @@ Object* Object::GetProperty(Object* receiver,
JSObject* holder = result->holder();
switch (result->type()) {
case NORMAL:
- value =
- holder->property_dictionary()->ValueAt(result->GetDictionaryEntry());
+ value = holder->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? Heap::undefined_value() : value;
case FIELD:
@@ -949,6 +1030,10 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case PROXY_TYPE:
accumulator->Add("<Proxy>");
break;
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ accumulator->Add("Cell for ");
+ JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator);
+ break;
default:
accumulator->Add("<Other heap object (%d)>", map()->instance_type());
break;
@@ -1042,6 +1127,10 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case CODE_TYPE:
reinterpret_cast<Code*>(this)->CodeIterateBody(v);
break;
+ case JS_GLOBAL_PROPERTY_CELL_TYPE:
+ reinterpret_cast<JSGlobalPropertyCell*>(this)
+ ->JSGlobalPropertyCellIterateBody(v);
+ break;
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
@@ -1250,12 +1339,27 @@ Object* JSObject::AddConstantFunctionProperty(String* name,
Object* JSObject::AddSlowProperty(String* name,
Object* value,
PropertyAttributes attributes) {
+ ASSERT(!HasFastProperties());
+ Dictionary* dict = property_dictionary();
+ Object* store_value = value;
+ if (IsJSGlobalObject()) {
+ // In case name is an orphaned property reuse the cell.
+ int entry = dict->FindStringEntry(name);
+ if (entry != Dictionary::kNotFound) {
+ store_value = dict->ValueAt(entry);
+ JSGlobalPropertyCell::cast(store_value)->set_value(value);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ dict->SetStringEntry(entry, name, store_value, details);
+ return value;
+ }
+ store_value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (store_value->IsFailure()) return store_value;
+ JSGlobalPropertyCell::cast(store_value)->set_value(value);
+ }
PropertyDetails details = PropertyDetails(attributes, NORMAL);
- Object* result = property_dictionary()->AddStringEntry(name, value, details);
+ Object* result = dict->AddStringEntry(name, store_value, details);
if (result->IsFailure()) return result;
- if (property_dictionary() != result) {
- set_properties(Dictionary::cast(result));
- }
+ if (dict != result) set_properties(Dictionary::cast(result));
return value;
}
@@ -1302,19 +1406,16 @@ Object* JSObject::ReplaceSlowProperty(String* name,
Object* value,
PropertyAttributes attributes) {
Dictionary* dictionary = property_dictionary();
- PropertyDetails old_details =
- dictionary->DetailsAt(dictionary->FindStringEntry(name));
- int new_index = old_details.index();
- if (old_details.IsTransition()) new_index = 0;
-
- PropertyDetails new_details(attributes, NORMAL, old_details.index());
- Object* result =
- property_dictionary()->SetOrAddStringEntry(name, value, new_details);
- if (result->IsFailure()) return result;
- if (property_dictionary() != result) {
- set_properties(Dictionary::cast(result));
+ int old_index = dictionary->FindStringEntry(name);
+ int new_enumeration_index = 0; // 0 means "Use the next available index."
+ if (old_index != -1) {
+ // All calls to ReplaceSlowProperty have had all transitions removed.
+ ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
+ new_enumeration_index = dictionary->DetailsAt(old_index).index();
}
- return value;
+
+ PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
+ return SetNormalizedProperty(name, value, new_details);
}
Object* JSObject::ConvertDescriptorToFieldAndMapTransition(
@@ -1547,7 +1648,7 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
if (JSObject::cast(pt)->HasFastElements()) continue;
Dictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
+ if (entry != Dictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -1562,7 +1663,11 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors();
- int number = descriptors->Search(name);
+ int number = DescriptorLookupCache::Lookup(descriptors, name);
+ if (number == DescriptorLookupCache::kAbsent) {
+ number = descriptors->Search(name);
+ DescriptorLookupCache::Update(descriptors, name, number);
+ }
if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(this, descriptors->GetDetails(number), number);
} else {
@@ -1594,10 +1699,20 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
}
} else {
int entry = property_dictionary()->FindStringEntry(name);
- if (entry != DescriptorArray::kNotFound) {
+ if (entry != Dictionary::kNotFound) {
// Make sure to disallow caching for uninitialized constants
// found in the dictionary-mode objects.
- if (property_dictionary()->ValueAt(entry)->IsTheHole()) {
+ Object* value = property_dictionary()->ValueAt(entry);
+ if (IsJSGlobalObject()) {
+ PropertyDetails d = property_dictionary()->DetailsAt(entry);
+ if (d.IsDeleted()) {
+ result->NotFound();
+ return;
+ }
+ value = JSGlobalPropertyCell::cast(value)->value();
+ ASSERT(result->IsLoaded());
+ }
+ if (value->IsTheHole()) {
result->DisallowCaching();
}
result->DictionaryResult(this, entry);
@@ -1695,7 +1810,7 @@ Object* JSObject::SetProperty(LookupResult* result,
// Check access rights if needed.
if (IsAccessCheckNeeded()
- && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(result, name, value);
}
@@ -1729,8 +1844,7 @@ Object* JSObject::SetProperty(LookupResult* result,
// transition or null descriptor and there are no setters in the prototypes.
switch (result->type()) {
case NORMAL:
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
- return value;
+ return SetNormalizedProperty(result, value);
case FIELD:
return FastPropertyAtPut(result->GetFieldIndex(), value);
case MAP_TRANSITION:
@@ -1812,8 +1926,7 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty(
// Check of IsReadOnly removed from here in clone.
switch (result->type()) {
case NORMAL:
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
- return value;
+ return SetNormalizedProperty(result, value);
case FIELD:
return FastPropertyAtPut(result->GetFieldIndex(), value);
case MAP_TRANSITION:
@@ -2001,6 +2114,10 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = r.GetConstantFunction();
+ if (IsJSGlobalObject()) {
+ value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (value->IsFailure()) return value;
+ }
Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
if (result->IsFailure()) return result;
dictionary = Dictionary::cast(result);
@@ -2010,6 +2127,10 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = FastPropertyAt(r.GetFieldIndex());
+ if (IsJSGlobalObject()) {
+ value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (value->IsFailure()) return value;
+ }
Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
if (result->IsFailure()) return result;
dictionary = Dictionary::cast(result);
@@ -2019,6 +2140,10 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
Object* value = r.GetCallbacksObject();
+ if (IsJSGlobalObject()) {
+ value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (value->IsFailure()) return value;
+ }
Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
if (result->IsFailure()) return result;
dictionary = Dictionary::cast(result);
@@ -2078,6 +2203,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
Object* JSObject::TransformToFastProperties(int unused_property_fields) {
if (HasFastProperties()) return this;
+ ASSERT(!IsJSGlobalObject());
return property_dictionary()->
TransformPropertiesToFastFor(this, unused_property_fields);
}
@@ -2132,12 +2258,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
if (obj->IsFailure()) return obj;
- ASSERT(!HasFastProperties());
- // Attempt to remove the property from the property dictionary.
- Dictionary* dictionary = property_dictionary();
- int entry = dictionary->FindStringEntry(name);
- if (entry != -1) return dictionary->DeleteProperty(entry, mode);
- return Heap::true_value();
+ return DeleteNormalizedProperty(name, mode);
}
@@ -2187,7 +2308,9 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
ASSERT(!HasFastElements());
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) return dictionary->DeleteProperty(entry, mode);
+ if (entry != Dictionary::kNotFound) {
+ return dictionary->DeleteProperty(entry, mode);
+ }
return Heap::true_value();
}
@@ -2259,7 +2382,9 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
} else {
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) return dictionary->DeleteProperty(entry, mode);
+ if (entry != Dictionary::kNotFound) {
+ return dictionary->DeleteProperty(entry, mode);
+ }
}
return Heap::true_value();
}
@@ -2311,10 +2436,7 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
if (obj->IsFailure()) return obj;
// Make sure the properties are normalized before removing the entry.
- Dictionary* dictionary = property_dictionary();
- int entry = dictionary->FindStringEntry(name);
- if (entry != -1) return dictionary->DeleteProperty(entry, mode);
- return Heap::true_value();
+ return DeleteNormalizedProperty(name, mode);
}
}
@@ -2567,7 +2689,7 @@ Object* JSObject::DefineGetterSetter(String* name,
if (!HasFastElements()) {
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
+ if (entry != Dictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.IsReadOnly()) return Heap::undefined_value();
@@ -2617,12 +2739,7 @@ Object* JSObject::DefineGetterSetter(String* name,
if (ok->IsFailure()) return ok;
// Update the dictionary with the new CALLBACKS property.
- Object* dict =
- property_dictionary()->SetOrAddStringEntry(name, structure, details);
- if (dict->IsFailure()) return dict;
-
- // Set the potential new dictionary on the object.
- set_properties(Dictionary::cast(dict));
+ return SetNormalizedProperty(name, structure, details);
}
return structure;
@@ -2676,7 +2793,7 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
if (!jsObject->HasFastElements()) {
Dictionary* dictionary = jsObject->element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
+ if (entry != Dictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -3967,6 +4084,11 @@ void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
}
+void JSGlobalPropertyCell::JSGlobalPropertyCellIterateBody(ObjectVisitor* v) {
+ IteratePointers(v, kValueOffset, kValueOffset + kPointerSize);
+}
+
+
uint16_t ConsString::ConsStringGet(int index) {
ASSERT(index >= 0 && index < this->length());
@@ -4632,7 +4754,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
- IteratePointers(v, kNameOffset, kCodeOffset + kPointerSize);
+ IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
}
@@ -4890,8 +5012,30 @@ const char* Code::ICState2String(InlineCacheState state) {
}
+const char* Code::PropertyType2String(PropertyType type) {
+ switch (type) {
+ case NORMAL: return "NORMAL";
+ case FIELD: return "FIELD";
+ case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
+ case CALLBACKS: return "CALLBACKS";
+ case INTERCEPTOR: return "INTERCEPTOR";
+ case MAP_TRANSITION: return "MAP_TRANSITION";
+ case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
+ case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
void Code::Disassemble(const char* name) {
PrintF("kind = %s\n", Kind2String(kind()));
+ if (is_inline_cache_stub()) {
+ PrintF("ic_state = %s\n", ICState2String(ic_state()));
+ PrintF("ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
+ if (ic_state() == MONOMORPHIC) {
+ PrintF("type = %s\n", PropertyType2String(type()));
+ }
+ }
if ((name != NULL) && (name[0] != '\0')) {
PrintF("name = %s\n", name);
}
@@ -4977,10 +5121,8 @@ Object* JSArray::Initialize(int capacity) {
}
-void JSArray::EnsureSize(int required_size) {
+void JSArray::Expand(int required_size) {
Handle<JSArray> self(this);
- ASSERT(HasFastElements());
- if (elements()->length() >= required_size) return;
Handle<FixedArray> old_backing(elements());
int old_size = old_backing->length();
// Doubling in size would be overkill, but leave some slack to avoid
@@ -5090,7 +5232,9 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
return true;
}
} else {
- if (element_dictionary()->FindNumberEntry(index) != -1) return true;
+ if (element_dictionary()->FindNumberEntry(index) != Dictionary::kNotFound) {
+ return true;
+ }
}
// Handle [] on String objects.
@@ -5165,7 +5309,8 @@ bool JSObject::HasLocalElement(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
} else {
- return element_dictionary()->FindNumberEntry(index) != -1;
+ return element_dictionary()->FindNumberEntry(index)
+ != Dictionary::kNotFound;
}
}
@@ -5191,7 +5336,9 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
if ((index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
} else {
- if (element_dictionary()->FindNumberEntry(index) != -1) return true;
+ if (element_dictionary()->FindNumberEntry(index) != Dictionary::kNotFound) {
+ return true;
+ }
}
// Handle [] on String objects.
@@ -5203,27 +5350,6 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
}
-Object* JSObject::SetElementPostInterceptor(uint32_t index, Object* value) {
- if (HasFastElements()) return SetFastElement(index, value);
-
- // Dictionary case.
- ASSERT(!HasFastElements());
-
- FixedArray* elms = FixedArray::cast(elements());
- Object* result = Dictionary::cast(elms)->AtNumberPut(index, value);
- if (result->IsFailure()) return result;
- if (elms != FixedArray::cast(result)) {
- set_elements(FixedArray::cast(result));
- }
-
- if (IsJSArray()) {
- return JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
- }
-
- return value;
-}
-
-
Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
@@ -5250,7 +5376,7 @@ Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
if (!result.IsEmpty()) return *value_handle;
}
Object* raw_result =
- this_handle->SetElementPostInterceptor(index, *value_handle);
+ this_handle->SetElementWithoutInterceptor(index, *value_handle);
RETURN_IF_SCHEDULED_EXCEPTION();
return raw_result;
}
@@ -5332,6 +5458,11 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
return SetElementWithInterceptor(index, value);
}
+ return SetElementWithoutInterceptor(index, value);
+}
+
+
+Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
// Fast case.
if (HasFastElements()) return SetFastElement(index, value);
@@ -5343,7 +5474,7 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
Dictionary* dictionary = Dictionary::cast(elms);
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
+ if (entry != Dictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -5437,8 +5568,22 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
} else {
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
- return dictionary->ValueAt(entry);
+ if (entry != Dictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ // Only accessors allowed as elements.
+ FixedArray* structure = FixedArray::cast(element);
+ Object* getter = structure->get(kGetterIndex);
+ if (getter->IsJSFunction()) {
+ return GetPropertyWithDefinedGetter(receiver,
+ JSFunction::cast(getter));
+ } else {
+ // Getter is not a function.
+ return Heap::undefined_value();
+ }
+ }
+ return element;
}
}
@@ -5507,7 +5652,7 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
} else {
Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index);
- if (entry != -1) {
+ if (entry != Dictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -5800,7 +5945,8 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
- return element_dictionary()->FindNumberEntry(index) != -1;
+ return element_dictionary()->FindNumberEntry(index)
+ != Dictionary::kNotFound;
}
@@ -6325,7 +6471,7 @@ Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) {
template <int prefix_size, int element_size>
int HashTable<prefix_size, element_size>::FindEntry(HashTableKey* key) {
uint32_t nof = NumberOfElements();
- if (nof == 0) return -1; // Bail out if empty.
+ if (nof == 0) return kNotFound; // Bail out if empty.
uint32_t capacity = Capacity();
uint32_t hash = key->Hash();
@@ -6335,17 +6481,17 @@ int HashTable<prefix_size, element_size>::FindEntry(HashTableKey* key) {
uint32_t passed_elements = 0;
if (!element->IsNull()) {
if (!element->IsUndefined() && key->IsMatch(element)) return entry;
- if (++passed_elements == nof) return -1;
+ if (++passed_elements == nof) return kNotFound;
}
for (uint32_t i = 1; !element->IsUndefined(); i++) {
entry = GetProbe(hash, i, capacity);
element = KeyAt(entry);
if (!element->IsNull()) {
if (!element->IsUndefined() && key->IsMatch(element)) return entry;
- if (++passed_elements == nof) return -1;
+ if (++passed_elements == nof) return kNotFound;
}
}
- return -1;
+ return kNotFound;
}
@@ -6354,8 +6500,8 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity(
int n, HashTableKey* key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
- // Make sure 25% is free
- if (nof + (nof >> 2) <= capacity) return this;
+ // Make sure 50% is free
+ if (nof + (nof >> 1) <= capacity) return this;
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
@@ -6436,10 +6582,6 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
AssertNoAllocation no_alloc;
- // Loose all details on properties when moving them around.
- // Elements do not have special details like properties.
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
-
uint32_t pos = 0;
uint32_t undefs = 0;
for (int i = 0; i < capacity; i++) {
@@ -6450,21 +6592,27 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
Object* value = dict->ValueAt(i);
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.type() == CALLBACKS) {
+ // Bail out and do the sorting of undefineds and array holes in JS.
+ return Smi::FromInt(-1);
+ }
uint32_t key = NumberToUint32(k);
if (key < limit) {
if (value->IsUndefined()) {
undefs++;
} else {
- new_dict->AddNumberEntry(pos, value, no_details);
+ new_dict->AddNumberEntry(pos, value, details);
pos++;
}
} else {
- new_dict->AddNumberEntry(key, value, no_details);
+ new_dict->AddNumberEntry(key, value, details);
}
}
}
uint32_t result = pos;
+ PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
while (undefs > 0) {
new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details);
pos++;
@@ -6583,6 +6731,14 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
}
+Object* JSGlobalObject::GetPropertyCell(LookupResult* result) {
+ ASSERT(!HasFastProperties());
+ Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
+ ASSERT(value->IsJSGlobalPropertyCell());
+ return value;
+}
+
+
Object* SymbolTable::LookupString(String* string, Object** s) {
SymbolKey key(string);
return LookupKey(&key, s);
@@ -6592,7 +6748,7 @@ Object* SymbolTable::LookupString(String* string, Object** s) {
bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
SymbolKey key(string);
int entry = FindEntry(&key);
- if (entry == -1) {
+ if (entry == kNotFound) {
return false;
} else {
String* result = String::cast(KeyAt(entry));
@@ -6613,7 +6769,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
int entry = FindEntry(key);
// Symbol already in table.
- if (entry != -1) {
+ if (entry != kNotFound) {
*s = KeyAt(entry);
return this;
}
@@ -6643,7 +6799,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
Object* CompilationCacheTable::Lookup(String* src) {
StringKey key(src);
int entry = FindEntry(&key);
- if (entry == -1) return Heap::undefined_value();
+ if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -6651,7 +6807,7 @@ Object* CompilationCacheTable::Lookup(String* src) {
Object* CompilationCacheTable::LookupEval(String* src, Context* context) {
StringSharedKey key(src, context->closure()->shared());
int entry = FindEntry(&key);
- if (entry == -1) return Heap::undefined_value();
+ if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -6660,7 +6816,7 @@ Object* CompilationCacheTable::LookupRegExp(String* src,
JSRegExp::Flags flags) {
RegExpKey key(src, flags);
int entry = FindEntry(&key);
- if (entry == -1) return Heap::undefined_value();
+ if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -6756,64 +6912,10 @@ class SymbolsKey : public HashTableKey {
};
-// MapNameKeys are used as keys in lookup caches.
-class MapNameKey : public HashTableKey {
- public:
- MapNameKey(Map* map, String* name)
- : map_(map), name_(name) { }
-
- bool IsMatch(Object* other) {
- if (!other->IsFixedArray()) return false;
- FixedArray* pair = FixedArray::cast(other);
- Map* map = Map::cast(pair->get(0));
- if (map != map_) return false;
- String* name = String::cast(pair->get(1));
- return name->Equals(name_);
- }
-
- typedef uint32_t (*HashFunction)(Object* obj);
-
- virtual HashFunction GetHashFunction() { return MapNameHash; }
-
- static uint32_t MapNameHashHelper(Map* map, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- return addr_hash ^ name->Hash();
- }
-
- static uint32_t MapNameHash(Object* obj) {
- FixedArray* pair = FixedArray::cast(obj);
- Map* map = Map::cast(pair->get(0));
- String* name = String::cast(pair->get(1));
- return MapNameHashHelper(map, name);
- }
-
- virtual uint32_t Hash() {
- return MapNameHashHelper(map_, name_);
- }
-
- virtual Object* GetObject() {
- Object* obj = Heap::AllocateFixedArray(2);
- if (obj->IsFailure()) return obj;
- FixedArray* pair = FixedArray::cast(obj);
- pair->set(0, map_);
- pair->set(1, name_);
- return pair;
- }
-
- virtual bool IsStringKey() { return false; }
-
- private:
- Map* map_;
- String* name_;
-};
-
-
Object* MapCache::Lookup(FixedArray* array) {
SymbolsKey key(array);
int entry = FindEntry(&key);
- if (entry == -1) return Heap::undefined_value();
+ if (entry == kNotFound) return Heap::undefined_value();
return get(EntryToIndex(entry) + 1);
}
@@ -6832,31 +6934,6 @@ Object* MapCache::Put(FixedArray* array, Map* value) {
}
-int LookupCache::Lookup(Map* map, String* name) {
- MapNameKey key(map, name);
- int entry = FindEntry(&key);
- if (entry == -1) return kNotFound;
- return Smi::cast(get(EntryToIndex(entry) + 1))->value();
-}
-
-
-Object* LookupCache::Put(Map* map, String* name, int value) {
- MapNameKey key(map, name);
- Object* obj = EnsureCapacity(1, &key);
- if (obj->IsFailure()) return obj;
- Object* k = key.GetObject();
- if (k->IsFailure()) return k;
-
- LookupCache* cache = reinterpret_cast<LookupCache*>(obj);
- int entry = cache->FindInsertionEntry(k, key.Hash());
- int index = EntryToIndex(entry);
- cache->set(index, k);
- cache->set(index + 1, Smi::FromInt(value), SKIP_WRITE_BARRIER);
- cache->ElementAdded();
- return cache;
-}
-
-
Object* Dictionary::Allocate(int at_least_space_for) {
Object* obj = DictionaryBase::Allocate(at_least_space_for);
// Initialize the next enumeration index.
@@ -6989,7 +7066,7 @@ Object* Dictionary::AtPut(HashTableKey* key, Object* value) {
int entry = FindEntry(key);
// If the entry is present set the value;
- if (entry != -1) {
+ if (entry != kNotFound) {
ValueAtPut(entry, value);
return this;
}
@@ -7062,7 +7139,7 @@ Object* Dictionary::AddStringEntry(String* key,
Object* value,
PropertyDetails details) {
StringKey k(key);
- SLOW_ASSERT(FindEntry(&k) == -1);
+ SLOW_ASSERT(FindEntry(&k) == kNotFound);
return Add(&k, value, details);
}
@@ -7072,17 +7149,11 @@ Object* Dictionary::AddNumberEntry(uint32_t key,
PropertyDetails details) {
NumberKey k(key);
UpdateMaxNumberKey(key);
- SLOW_ASSERT(FindEntry(&k) == -1);
+ SLOW_ASSERT(FindEntry(&k) == kNotFound);
return Add(&k, value, details);
}
-Object* Dictionary::AtStringPut(String* key, Object* value) {
- StringKey k(key);
- return AtPut(&k, value);
-}
-
-
Object* Dictionary::AtNumberPut(uint32_t key, Object* value) {
NumberKey k(key);
UpdateMaxNumberKey(key);
@@ -7090,12 +7161,10 @@ Object* Dictionary::AtNumberPut(uint32_t key, Object* value) {
}
-Object* Dictionary::SetOrAddStringEntry(String* key,
- Object* value,
- PropertyDetails details) {
- StringKey k(key);
- int entry = FindEntry(&k);
- if (entry == -1) return AddStringEntry(key, value, details);
+Object* Dictionary::SetStringEntry(int entry,
+ String* key,
+ Object* value,
+ PropertyDetails details) {
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
@@ -7198,8 +7267,12 @@ Object* Dictionary::SlowReverseLookup(Object* value) {
int capacity = Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
- if (IsKey(k) && ValueAt(i) == value) {
- return k;
+ if (IsKey(k)) {
+ Object* e = ValueAt(i);
+ if (e->IsJSGlobalPropertyCell()) {
+ e = JSGlobalPropertyCell::cast(e)->value();
+ }
+ if (e == value) return k;
}
}
return Heap::undefined_value();
diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h
index 493d22b..775b6c7 100644
--- a/V8Binding/v8/src/objects.h
+++ b/V8Binding/v8/src/objects.h
@@ -59,7 +59,6 @@
// - SymbolTable
// - CompilationCacheTable
// - MapCache
-// - LookupCache
// - Context
// - GlobalContext
// - String
@@ -154,20 +153,23 @@ class PropertyDetails BASE_EMBEDDED {
int index() { return IndexField::decode(value_); }
+ inline PropertyDetails AsDeleted();
+
static bool IsValidIndex(int index) { return IndexField::is_valid(index); }
bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+ bool IsDeleted() { return DeletedField::decode(value_) != 0;}
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
class TypeField: public BitField<PropertyType, 0, 3> {};
class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
- class IndexField: public BitField<uint32_t, 6, 32-6> {};
+ class DeletedField: public BitField<uint32_t, 6, 1> {};
+ class IndexField: public BitField<uint32_t, 7, 31-7> {};
static const int kInitialIndex = 1;
-
private:
uint32_t value_;
};
@@ -264,6 +266,7 @@ enum PropertyNormalizationMode {
V(HEAP_NUMBER_TYPE) \
V(FIXED_ARRAY_TYPE) \
V(CODE_TYPE) \
+ V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
V(ODDBALL_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
@@ -548,6 +551,7 @@ enum InstanceType {
FIXED_ARRAY_TYPE,
CODE_TYPE,
ODDBALL_TYPE,
+ JS_GLOBAL_PROPERTY_CELL_TYPE,
PROXY_TYPE,
BYTE_ARRAY_TYPE,
FILLER_TYPE,
@@ -678,7 +682,6 @@ class Object BASE_EMBEDDED {
inline bool IsSymbolTable();
inline bool IsCompilationCacheTable();
inline bool IsMapCache();
- inline bool IsLookupCache();
inline bool IsPrimitive();
inline bool IsGlobalObject();
inline bool IsJSGlobalObject();
@@ -686,6 +689,7 @@ class Object BASE_EMBEDDED {
inline bool IsJSGlobalProxy();
inline bool IsUndetectableObject();
inline bool IsAccessCheckNeeded();
+ inline bool IsJSGlobalPropertyCell();
// Returns true if this object is an instance of the specified
// function template.
@@ -1162,8 +1166,28 @@ class HeapNumber: public HeapObject {
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
+ // IEEE doubles are two 32 bit words. The first is just mantissa, the second
+ // is a mixture of sign, exponent and mantissa. Our current platforms are all
+ // little endian apart from non-EABI arm which is little endian with big
+ // endian floating point word ordering!
+#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
+ static const int kMantissaOffset = kValueOffset;
+ static const int kExponentOffset = kValueOffset + 4;
+#else
+ static const int kMantissaOffset = kValueOffset + 4;
+ static const int kExponentOffset = kValueOffset;
+# define BIG_ENDIAN_FLOATING_POINT 1
+#endif
static const int kSize = kValueOffset + kDoubleSize;
+ static const uint32_t kSignMask = 0x80000000u;
+ static const uint32_t kExponentMask = 0x7ff00000u;
+ static const uint32_t kMantissaMask = 0xfffffu;
+ static const int kExponentBias = 1023;
+ static const int kExponentShift = 20;
+ static const int kMantissaBitsInTopWord = 20;
+ static const int kNonMantissaBitsInTopWord = 12;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
};
@@ -1175,6 +1199,8 @@ class HeapNumber: public HeapObject {
// caching.
class JSObject: public HeapObject {
public:
+ enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case, and a Dictionary in the
// slow case.
@@ -1225,6 +1251,23 @@ class JSObject: public HeapObject {
Object* value,
PropertyAttributes attributes);
+ // Retrieve a value in a normalized object given a lookup result.
+ // Handles the special representation of JS global objects.
+ Object* GetNormalizedProperty(LookupResult* result);
+
+ // Sets the property value in a normalized object given a lookup result.
+ // Handles the special representation of JS global objects.
+ Object* SetNormalizedProperty(LookupResult* result, Object* value);
+
+ // Sets the property value in a normalized object given (key, value, details).
+ // Handles the special representation of JS global objects.
+ Object* SetNormalizedProperty(String* name,
+ Object* value,
+ PropertyDetails details);
+
+ // Deletes the named property in a normalized object.
+ Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
+
// Sets a property that currently has lazy loading.
Object* SetLazyProperty(LookupResult* result,
String* name,
@@ -1275,7 +1318,6 @@ class JSObject: public HeapObject {
return GetLocalPropertyAttribute(name) != ABSENT;
}
- enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
Object* DeleteProperty(String* name, DeleteMode mode);
Object* DeleteElement(uint32_t index, DeleteMode mode);
Object* DeleteLazyProperty(LookupResult* result,
@@ -1518,7 +1560,7 @@ class JSObject: public HeapObject {
private:
Object* SetElementWithInterceptor(uint32_t index, Object* value);
- Object* SetElementPostInterceptor(uint32_t index, Object* value);
+ Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
@@ -1621,6 +1663,9 @@ class FixedArray: public Array {
// Garbage collection support.
static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+ // Code Generation support.
+ static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
// Casting.
static inline FixedArray* cast(Object* obj);
@@ -1909,6 +1954,9 @@ class HashTable: public FixedArray {
static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
+ // Constant used for denoting a absent entry.
+ static const int kNotFound = -1;
+
protected:
// Find entry for key otherwise return -1.
int FindEntry(HashTableKey* key);
@@ -1992,27 +2040,6 @@ class MapCache: public HashTable<0, 2> {
};
-// LookupCache.
-//
-// Maps a key consisting of a map and a name to an index within a
-// fast-case properties array.
-//
-// LookupCaches are used to avoid repeatedly searching instance
-// descriptors.
-class LookupCache: public HashTable<0, 2> {
- public:
- int Lookup(Map* map, String* name);
- Object* Put(Map* map, String* name, int offset);
- static inline LookupCache* cast(Object* obj);
-
- // Constant returned by Lookup when the key was not found.
- static const int kNotFound = -1;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(LookupCache);
-};
-
-
// Dictionary for keeping properties and elements in slow case.
//
// One element in the prefix is used for storing non-element
@@ -2027,7 +2054,9 @@ class DictionaryBase: public HashTable<2, 3> {};
class Dictionary: public DictionaryBase {
public:
// Returns the value at entry.
- Object* ValueAt(int entry) { return get(EntryToIndex(entry)+1); }
+ Object* ValueAt(int entry) {
+ return get(EntryToIndex(entry)+1);
+ }
// Set the value for entry.
void ValueAtPut(int entry, Object* value) {
@@ -2036,6 +2065,7 @@ class Dictionary: public DictionaryBase {
// Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) {
+ ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2)));
}
@@ -2063,16 +2093,16 @@ class Dictionary: public DictionaryBase {
Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
// Type specific at put (default NONE attributes is used when adding).
- Object* AtStringPut(String* key, Object* value);
Object* AtNumberPut(uint32_t key, Object* value);
Object* AddStringEntry(String* key, Object* value, PropertyDetails details);
Object* AddNumberEntry(uint32_t key, Object* value, PropertyDetails details);
// Set an existing entry or add a new one if needed.
- Object* SetOrAddStringEntry(String* key,
- Object* value,
- PropertyDetails details);
+ Object* SetStringEntry(int entry,
+ String* key,
+ Object* value,
+ PropertyDetails details);
Object* SetOrAddNumberEntry(uint32_t key,
Object* value,
@@ -2251,6 +2281,7 @@ class Code: public HeapObject {
// Printing
static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
+ static const char* PropertyType2String(PropertyType type);
void Disassemble(const char* name);
#endif // ENABLE_DISASSEMBLER
@@ -2273,7 +2304,7 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
- inline InLoopFlag ic_in_loop(); // Only valid for IC stubs..
+ inline InLoopFlag ic_in_loop(); // Only valid for IC stubs.
inline PropertyType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
@@ -2470,7 +2501,7 @@ class Map: public HeapObject {
return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
}
- // Tells whether the instance has a named interceptor.
+ // Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor() {
set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
}
@@ -2479,7 +2510,7 @@ class Map: public HeapObject {
return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
}
- // Tells whether the instance has a named interceptor.
+ // Records and queries whether the instance has an indexed interceptor.
inline void set_has_indexed_interceptor() {
set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
}
@@ -2654,16 +2685,16 @@ class Script: public Struct {
public:
// Script types.
enum Type {
- TYPE_NATIVE,
- TYPE_EXTENSION,
- TYPE_NORMAL
+ TYPE_NATIVE = 0,
+ TYPE_EXTENSION = 1,
+ TYPE_NORMAL = 2
};
// Script compilation types.
enum CompilationType {
- COMPILATION_TYPE_HOST,
- COMPILATION_TYPE_EVAL,
- COMPILATION_TYPE_JSON
+ COMPILATION_TYPE_HOST = 0,
+ COMPILATION_TYPE_EVAL = 1,
+ COMPILATION_TYPE_JSON = 2
};
// [source]: the script source.
@@ -2746,6 +2777,9 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ // [construct stub]: Code stub for constructing instances of this function.
+ DECL_ACCESSORS(construct_stub, Code)
+
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
@@ -2841,7 +2875,8 @@ class SharedFunctionInfo: public HeapObject {
// (An even number of integers has a size that is a multiple of a pointer.)
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize;
- static const int kLengthOffset = kCodeOffset + kPointerSize;
+ static const int kConstructStubOffset = kCodeOffset + kPointerSize;
+ static const int kLengthOffset = kConstructStubOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize;
@@ -3043,6 +3078,10 @@ class GlobalObject: public JSObject {
// JavaScript global object.
class JSGlobalObject: public GlobalObject {
public:
+
+ // Retrieve the property cell used to store a property.
+ Object* GetPropertyCell(LookupResult* result);
+
// Casting.
static inline JSGlobalObject* cast(Object* obj);
@@ -3931,6 +3970,31 @@ class Oddball: public HeapObject {
};
+class JSGlobalPropertyCell: public HeapObject {
+ public:
+ // [value]: value of the global property.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ static inline JSGlobalPropertyCell* cast(Object* obj);
+
+ // Dispatched behavior.
+ void JSGlobalPropertyCellIterateBody(ObjectVisitor* v);
+#ifdef DEBUG
+ void JSGlobalPropertyCellVerify();
+ void JSGlobalPropertyCellPrint();
+#endif
+
+ // Layout description.
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
+};
+
+
+
// Proxy describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
@@ -3985,7 +4049,7 @@ class JSArray: public JSObject {
// Uses handles. Ensures that the fixed array backing the JSArray has at
// least the stated size.
- void EnsureSize(int minimum_size_of_backing_fixed_array);
+ inline void EnsureSize(int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
#ifdef DEBUG
@@ -3998,6 +4062,10 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
private:
+ // Expand the fixed array backing of a fast-case JSArray to at least
+ // the requested size.
+ void Expand(int minimum_size_of_backing_fixed_array);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -4008,10 +4076,9 @@ class JSArray: public JSObject {
// If an accessor was found and it does not have a setter,
// the request is ignored.
//
-// To allow shadow an accessor property, the accessor can
-// have READ_ONLY property attribute so that a new value
-// is added to the local object to shadow the accessor
-// in prototypes.
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
class AccessorInfo: public Struct {
public:
DECL_ACCESSORS(getter, Object)
diff --git a/V8Binding/v8/src/oprofile-agent.cc b/V8Binding/v8/src/oprofile-agent.cc
index c4595b4..8aa3937 100644
--- a/V8Binding/v8/src/oprofile-agent.cc
+++ b/V8Binding/v8/src/oprofile-agent.cc
@@ -52,6 +52,10 @@ bool OProfileAgent::Initialize() {
return true;
}
#else
+ if (FLAG_oprofile) {
+ OS::Print("Warning: --oprofile specified but binary compiled without "
+ "oprofile support.\n");
+ }
return true;
#endif
}
diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc
index 271c3fd..2b4be79 100644
--- a/V8Binding/v8/src/parser.cc
+++ b/V8Binding/v8/src/parser.cc
@@ -1582,7 +1582,8 @@ VariableProxy* AstBuildingParser::Declare(Handle<String> name,
// For global const variables we bind the proxy to a variable.
if (mode == Variable::CONST && top_scope_->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
- var = NEW(Variable(top_scope_, name, Variable::CONST, true, false));
+ Variable::Kind kind = Variable::NORMAL;
+ var = NEW(Variable(top_scope_, name, Variable::CONST, true, kind));
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -2647,6 +2648,26 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
}
}
+ // Convert constant divisions to multiplications for speed.
+ if (op == Token::DIV &&
+ y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
+ double y_val = y->AsLiteral()->handle()->Number();
+ int64_t y_int = static_cast<int64_t>(y_val);
+ // There are rounding issues with this optimization, but they don't
+ // apply if the number to be divided with has a reciprocal that can be
+ // precisely represented as a floating point number. This is the case
+ // if the number is an integer power of 2. Negative integer powers of
+ // 2 work too, but for -2, -1, 1 and 2 we don't do the strength
+ // reduction because the inlined optimistic idiv has a reasonable
+ // chance of succeeding by producing a Smi answer with no remainder.
+ if (static_cast<double>(y_int) == y_val &&
+ (IsPowerOf2(y_int) || IsPowerOf2(-y_int)) &&
+ (y_int > 2 || y_int < -2)) {
+ y = NewNumberLiteral(1 / y_val);
+ op = Token::MUL;
+ }
+ }
+
// For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this
// code an AST node eventually.)
diff --git a/V8Binding/v8/src/platform-linux.cc b/V8Binding/v8/src/platform-linux.cc
index 79ffe81..39495ab 100644
--- a/V8Binding/v8/src/platform-linux.cc
+++ b/V8Binding/v8/src/platform-linux.cc
@@ -224,8 +224,8 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+static uintptr_t StringToULong(char* buffer) {
+ return strtoul(buffer, NULL, 16); // NOLINT
}
#endif
@@ -242,13 +242,13 @@ void OS::LogSharedLibraryAddresses() {
addr_buffer[10] = 0;
int result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
+ uintptr_t start = StringToULong(addr_buffer);
result = read(fd, addr_buffer + 2, 1);
if (result < 1) break;
if (addr_buffer[2] != '-') break;
result = read(fd, addr_buffer + 2, 8);
if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
+ uintptr_t end = StringToULong(addr_buffer);
char buffer[MAP_LENGTH];
int bytes_read = -1;
do {
@@ -262,10 +262,21 @@ void OS::LogSharedLibraryAddresses() {
// Ignore mappings that are not executable.
if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ // If there is no filename for this line then log it as an anonymous
+ // mapping and use the address as its name.
+ if (start_of_path == NULL) {
+ // 40 is enough to print a 64 bit address range.
+ ASSERT(sizeof(buffer) > 40);
+ snprintf(buffer,
+ sizeof(buffer),
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR,
+ start,
+ end);
+ LOG(SharedLibraryEvent(buffer, start, end));
+ } else {
+ buffer[bytes_read] = 0;
+ LOG(SharedLibraryEvent(start_of_path, start, end));
+ }
}
close(fd);
#endif
diff --git a/V8Binding/v8/src/platform-macos.cc b/V8Binding/v8/src/platform-macos.cc
index 3e0e284..f5b6458 100644
--- a/V8Binding/v8/src/platform-macos.cc
+++ b/V8Binding/v8/src/platform-macos.cc
@@ -35,10 +35,6 @@
#include <AvailabilityMacros.h>
-#ifdef MAC_OS_X_VERSION_10_5
-# include <execinfo.h> // backtrace, backtrace_symbols
-#endif
-
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
@@ -58,6 +54,17 @@
#include "platform.h"
+// Manually define these here as weak imports, rather than including execinfo.h.
+// This lets us launch on 10.4 which does not have these calls.
+extern "C" {
+ extern int backtrace(void**, int) __attribute__((weak_import));
+ extern char** backtrace_symbols(void* const*, int)
+ __attribute__((weak_import));
+ extern void backtrace_symbols_fd(void* const*, int, int)
+ __attribute__((weak_import));
+}
+
+
namespace v8 {
namespace internal {
@@ -214,9 +221,10 @@ int OS::ActivationFrameAlignment() {
int OS::StackWalk(Vector<StackFrame> frames) {
-#ifndef MAC_OS_X_VERSION_10_5
- return 0;
-#else
+ // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
+ if (backtrace == NULL)
+ return 0;
+
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size);
@@ -244,7 +252,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
free(symbols);
return frames_count;
-#endif
}
@@ -500,7 +507,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#endif // __DARWIN_UNIX03
#else
#error Unsupported Mac OS X host architecture.
-#endif // V8_TARGET_ARCH_IA32
+#endif // V8_HOST_ARCH_IA32
}
// We always sample the VM state.
diff --git a/V8Binding/v8/src/platform.h b/V8Binding/v8/src/platform.h
index 4522c74..b5123c5 100644
--- a/V8Binding/v8/src/platform.h
+++ b/V8Binding/v8/src/platform.h
@@ -44,6 +44,8 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#define V8_INFINITY INFINITY
+
// Windows specific stuff.
#ifdef WIN32
@@ -58,7 +60,8 @@ enum {
FP_NORMAL
};
-#define INFINITY HUGE_VAL
+#undef V8_INFINITY
+#define V8_INFINITY HUGE_VAL
namespace v8 {
namespace internal {
@@ -75,14 +78,6 @@ int strncasecmp(const char* s1, const char* s2, int n);
#endif // _MSC_VER
-// MinGW specific stuff.
-#ifdef __MINGW32__
-
-// Needed for va_list.
-#include <stdarg.h>
-
-#endif // __MINGW32__
-
// Random is missing on both Visual Studio and MinGW.
int random();
@@ -90,6 +85,10 @@ int random();
// GCC specific stuff
#ifdef __GNUC__
+
+// Needed for va_list on at least MinGW and Android.
+#include <stdarg.h>
+
#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
@@ -100,8 +99,8 @@ int random();
// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
#include <limits>
-#undef INFINITY
-#define INFINITY std::numeric_limits<double>::infinity()
+#undef V8_INFINITY
+#define V8_INFINITY std::numeric_limits<double>::infinity()
#endif
#endif // __GNUC__
@@ -109,6 +108,8 @@ int random();
namespace v8 {
namespace internal {
+class Semaphore;
+
double ceiling(double x);
// Forward declarations.
diff --git a/V8Binding/v8/src/property.h b/V8Binding/v8/src/property.h
index edab97a..851bae2 100644
--- a/V8Binding/v8/src/property.h
+++ b/V8Binding/v8/src/property.h
@@ -230,6 +230,7 @@ class LookupResult BASE_EMBEDDED {
bool IsReadOnly() { return details_.IsReadOnly(); }
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
+ bool IsDeleted() { return details_.IsDeleted(); }
bool IsValid() { return lookup_type_ != NOT_FOUND; }
bool IsNotFound() { return lookup_type_ == NOT_FOUND; }
@@ -256,8 +257,14 @@ class LookupResult BASE_EMBEDDED {
switch (type()) {
case FIELD:
return holder()->FastPropertyAt(GetFieldIndex());
- case NORMAL:
- return holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
+ case NORMAL: {
+ Object* value;
+ value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
+ if (holder()->IsJSGlobalObject()) {
+ value = JSGlobalPropertyCell::cast(value)->value();
+ }
+ return value;
+ }
case CONSTANT_FUNCTION:
return GetConstantFunction();
default:
@@ -306,7 +313,7 @@ class LookupResult BASE_EMBEDDED {
}
// In the dictionary case, the data is held in the value field.
ASSERT(lookup_type_ == DICTIONARY_TYPE);
- return holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
+ return holder()->GetNormalizedProperty(this);
}
private:
diff --git a/V8Binding/v8/src/regexp-delay.js b/V8Binding/v8/src/regexp-delay.js
index 8491863..14c3644 100644
--- a/V8Binding/v8/src/regexp-delay.js
+++ b/V8Binding/v8/src/regexp-delay.js
@@ -103,7 +103,7 @@ function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
function RegExpConstructor(pattern, flags) {
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
DoConstructRegExp(this, pattern, flags, true);
} else {
// RegExp : Called as function; see ECMA-262, section 15.10.3.1.
diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
index b87c51f..eea3c23 100644
--- a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
@@ -47,6 +47,7 @@ RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
if (backtrack_.is_linked()) backtrack_.Unuse();
+ if (own_buffer_) buffer_.Dispose();
}
diff --git a/V8Binding/v8/src/register-allocator.cc b/V8Binding/v8/src/register-allocator.cc
index 2599232..d1b08bb 100644
--- a/V8Binding/v8/src/register-allocator.cc
+++ b/V8Binding/v8/src/register-allocator.cc
@@ -40,18 +40,7 @@ namespace internal {
Result::Result(Register reg) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE)
- | TypeField::encode(REGISTER)
- | DataField::encode(reg.code_);
-}
-
-
-Result::Result(Register reg, StaticType type) {
- ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
- CodeGeneratorScope::Current()->allocator()->Use(reg);
- value_ = StaticTypeField::encode(type.static_type_)
- | TypeField::encode(REGISTER)
- | DataField::encode(reg.code_);
+ value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
}
diff --git a/V8Binding/v8/src/register-allocator.h b/V8Binding/v8/src/register-allocator.h
index c539191..f7167d9 100644
--- a/V8Binding/v8/src/register-allocator.h
+++ b/V8Binding/v8/src/register-allocator.h
@@ -45,80 +45,6 @@ namespace internal {
// -------------------------------------------------------------------------
-// StaticType
-//
-// StaticType represent the type of an expression or a word at runtime.
-// The types are ordered by knowledge, so that if a value can come about
-// in more than one way, and there are different static types inferred
-// for the different ways, the types can be combined to a type that we
-// are still certain of (possibly just "unknown").
-
-class StaticType BASE_EMBEDDED {
- public:
- StaticType() : static_type_(UNKNOWN_TYPE) {}
-
- static StaticType unknown() { return StaticType(); }
- static StaticType smi() { return StaticType(SMI_TYPE); }
- static StaticType jsstring() { return StaticType(STRING_TYPE); }
- static StaticType heap_object() { return StaticType(HEAP_OBJECT_TYPE); }
-
- // Accessors
- bool is_unknown() { return static_type_ == UNKNOWN_TYPE; }
- bool is_smi() { return static_type_ == SMI_TYPE; }
- bool is_heap_object() { return (static_type_ & HEAP_OBJECT_TYPE) != 0; }
- bool is_jsstring() { return static_type_ == STRING_TYPE; }
-
- bool operator==(StaticType other) const {
- return static_type_ == other.static_type_;
- }
-
- // Find the best approximating type for a value.
- // The argument must not be NULL.
- static StaticType TypeOf(Object* object) {
- // Remember to make the most specific tests first. A string is also a heap
- // object, so test for string-ness first.
- if (object->IsSmi()) return smi();
- if (object->IsString()) return jsstring();
- if (object->IsHeapObject()) return heap_object();
- return unknown();
- }
-
- // Merges two static types to a type that combines the knowledge
- // of both. If there is no way to combine (e.g., being a string *and*
- // being a smi), the resulting type is unknown.
- StaticType merge(StaticType other) {
- StaticType x(
- static_cast<StaticTypeEnum>(static_type_ & other.static_type_));
- return x;
- }
-
- private:
- enum StaticTypeEnum {
- // Numbers are chosen so that least upper bound of the following
- // partial order is implemented by bitwise "and":
- //
- // string
- // |
- // heap-object smi
- // \ /
- // unknown
- //
- UNKNOWN_TYPE = 0x00,
- SMI_TYPE = 0x01,
- HEAP_OBJECT_TYPE = 0x02,
- STRING_TYPE = 0x04 | HEAP_OBJECT_TYPE
- };
- explicit StaticType(StaticTypeEnum static_type) : static_type_(static_type) {}
-
- // StaticTypeEnum static_type_;
- StaticTypeEnum static_type_;
-
- friend class FrameElement;
- friend class Result;
-};
-
-
-// -------------------------------------------------------------------------
// Results
//
// Results encapsulate the compile-time values manipulated by the code
@@ -138,13 +64,9 @@ class Result BASE_EMBEDDED {
// Construct a register Result.
explicit Result(Register reg);
- // Construct a register Result with a known static type.
- Result(Register reg, StaticType static_type);
-
// Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) {
- value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_)
- | TypeField::encode(CONSTANT)
+ value_ = TypeField::encode(CONSTANT)
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@@ -182,15 +104,6 @@ class Result BASE_EMBEDDED {
inline void Unuse();
- StaticType static_type() const {
- return StaticType(StaticTypeField::decode(value_));
- }
-
- void set_static_type(StaticType type) {
- value_ = value_ & ~StaticTypeField::mask();
- value_ = value_ | StaticTypeField::encode(type.static_type_);
- }
-
Type type() const { return TypeField::decode(value_); }
void invalidate() { value_ = TypeField::encode(INVALID); }
@@ -225,9 +138,8 @@ class Result BASE_EMBEDDED {
private:
uint32_t value_;
- class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {};
- class TypeField: public BitField<Type, 3, 2> {};
- class DataField: public BitField<uint32_t, 5, 32 - 6> {};
+ class TypeField: public BitField<Type, 0, 2> {};
+ class DataField: public BitField<uint32_t, 2, 32 - 3> {};
inline void CopyTo(Result* destination) const;
diff --git a/V8Binding/v8/src/rewriter.cc b/V8Binding/v8/src/rewriter.cc
index e0a0226..4d1fbd9 100644
--- a/V8Binding/v8/src/rewriter.cc
+++ b/V8Binding/v8/src/rewriter.cc
@@ -283,7 +283,10 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
case Token::ASSIGN:
// No type can be infered from the general assignment.
- scoped_fni.Enter();
+ // Don't infer if it is "a = function(){...}();"-like expression.
+ if (node->value()->AsCall() == NULL) {
+ scoped_fni.Enter();
+ }
break;
case Token::ASSIGN_BIT_OR:
case Token::ASSIGN_BIT_XOR:
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index 78be512..28254f7 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -50,9 +50,8 @@ namespace v8 {
namespace internal {
-#define RUNTIME_ASSERT(value) do { \
- if (!(value)) return IllegalOperation(); \
-} while (false)
+#define RUNTIME_ASSERT(value) \
+ if (!(value)) return Top::ThrowIllegalOperation();
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
@@ -97,11 +96,6 @@ namespace internal {
static StaticResource<StringInputBuffer> runtime_string_input_buffer;
-static Object* IllegalOperation() {
- return Top::Throw(Heap::illegal_access_symbol());
-}
-
-
static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
StackLimitCheck check;
if (check.HasOverflowed()) return Top::StackOverflow();
@@ -124,7 +118,8 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
}
}
mode = copy->GetWriteBarrierMode();
- for (int i = 0; i < copy->map()->inobject_properties(); i++) {
+ int nof = copy->map()->inobject_properties();
+ for (int i = 0; i < nof; i++) {
Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) {
JSObject* jsObject = JSObject::cast(value);
@@ -418,48 +413,6 @@ static Object* Runtime_ClassOf(Arguments args) {
}
-static Object* Runtime_HasStringClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::String_symbol()));
-}
-
-
-static Object* Runtime_HasDateClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Date_symbol()));
-}
-
-
-static Object* Runtime_HasArrayClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Array_symbol()));
-}
-
-
-static Object* Runtime_HasFunctionClass(Arguments args) {
- return Heap::ToBoolean(
- args[0]->HasSpecificClassOf(Heap::function_class_symbol()));
-}
-
-
-static Object* Runtime_HasNumberClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Number_symbol()));
-}
-
-
-static Object* Runtime_HasBooleanClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Boolean_symbol()));
-}
-
-
-static Object* Runtime_HasArgumentsClass(Arguments args) {
- return Heap::ToBoolean(
- args[0]->HasSpecificClassOf(Heap::Arguments_symbol()));
-}
-
-
-static Object* Runtime_HasRegExpClass(Arguments args) {
- return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::RegExp_symbol()));
-}
-
-
static Object* Runtime_IsInPrototypeChain(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -522,12 +475,9 @@ static Object* Runtime_IsConstructCall(Arguments args) {
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSRegExp, raw_re, args[0]);
- Handle<JSRegExp> re(raw_re);
- CONVERT_CHECKED(String, raw_pattern, args[1]);
- Handle<String> pattern(raw_pattern);
- CONVERT_CHECKED(String, raw_flags, args[2]);
- Handle<String> flags(raw_flags);
+ CONVERT_ARG_CHECKED(JSRegExp, re, 0);
+ CONVERT_ARG_CHECKED(String, pattern, 1);
+ CONVERT_ARG_CHECKED(String, flags, 2);
Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
if (result.is_null()) return Failure::Exception();
return *result;
@@ -537,8 +487,7 @@ static Object* Runtime_RegExpCompile(Arguments args) {
static Object* Runtime_CreateApiFunction(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
- CONVERT_CHECKED(FunctionTemplateInfo, raw_data, args[0]);
- Handle<FunctionTemplateInfo> data(raw_data);
+ CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
return *Factory::CreateApiFunction(data);
}
@@ -627,9 +576,6 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
// property as read-only, so we don't either.
PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
- // Only optimize the object if we intend to add more than 5 properties.
- OptimizedObjectForAddingMultipleProperties ba(global, pairs->length()/2 > 5);
-
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
for (int i = 0; i < length; i += 2) {
@@ -941,10 +887,8 @@ static Object* Runtime_InitializeConstGlobal(Arguments args) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- Dictionary* dictionary = global->property_dictionary();
- int entry = lookup.GetDictionaryEntry();
- if (dictionary->ValueAt(entry)->IsTheHole()) {
- dictionary->ValueAtPut(entry, *value);
+ if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+ global->SetNormalizedProperty(&lookup, *value);
}
} else {
// Ignore re-initialization of constants that have already been
@@ -1034,10 +978,8 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
- Dictionary* dictionary = context_ext->property_dictionary();
- int entry = lookup.GetDictionaryEntry();
- if (dictionary->ValueAt(entry)->IsTheHole()) {
- dictionary->ValueAtPut(entry, *value);
+ if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
+ context_ext->SetNormalizedProperty(&lookup, *value);
}
} else {
// We should not reach here. Any real, named property should be
@@ -1066,15 +1008,12 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
static Object* Runtime_RegExpExec(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
- CONVERT_CHECKED(JSRegExp, raw_regexp, args[0]);
- Handle<JSRegExp> regexp(raw_regexp);
- CONVERT_CHECKED(String, raw_subject, args[1]);
- Handle<String> subject(raw_subject);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_CHECKED(String, subject, 1);
// Due to the way the JS files are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_CHECKED(Smi, index, args[2]);
- CONVERT_CHECKED(JSArray, raw_last_match_info, args[3]);
- Handle<JSArray> last_match_info(raw_last_match_info);
+ CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index->value() >= 0);
RUNTIME_ASSERT(index->value() <= subject->length());
@@ -1168,6 +1107,21 @@ static Object* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
}
+static Object* Runtime_FunctionGetPositionForOffset(Arguments args) {
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
+
+ Code* code = fun->code();
+ RUNTIME_ASSERT(0 <= offset && offset < code->Size());
+
+ Address pc = code->address() + offset;
+ return Smi::FromInt(fun->code()->SourcePosition(pc));
+}
+
+
+
static Object* Runtime_FunctionSetInstanceClassName(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1217,8 +1171,7 @@ static Object* Runtime_SetCode(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
- CONVERT_CHECKED(JSFunction, raw_target, args[0]);
- Handle<JSFunction> target(raw_target);
+ CONVERT_ARG_CHECKED(JSFunction, target, 0);
Handle<Object> code = args.at<Object>(1);
Handle<Context> context(target->context());
@@ -2416,6 +2369,19 @@ static Object* Runtime_NumberToRadixString(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ // Fast case where the result is a one character string.
+ if (args[0]->IsSmi() && args[1]->IsSmi()) {
+ int value = Smi::cast(args[0])->value();
+ int radix = Smi::cast(args[1])->value();
+ if (value >= 0 && value < radix) {
+ RUNTIME_ASSERT(radix <= 36);
+ // Character array used for conversion.
+ static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
+ }
+ }
+
+ // Slow case.
CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) {
return Heap::AllocateStringFromAscii(CStrVector("NaN"));
@@ -2620,12 +2586,9 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
String* key = String::cast(args[1]);
if (receiver->HasFastProperties()) {
// Attempt to use lookup cache.
- Object* obj = Heap::GetKeyedLookupCache();
- if (obj->IsFailure()) return obj;
- LookupCache* cache = LookupCache::cast(obj);
Map* receiver_map = receiver->map();
- int offset = cache->Lookup(receiver_map, key);
- if (offset != LookupCache::kNotFound) {
+ int offset = KeyedLookupCache::Lookup(receiver_map, key);
+ if (offset != -1) {
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
}
@@ -2635,9 +2598,7 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
- Object* obj = cache->Put(receiver_map, key, offset);
- if (obj->IsFailure()) return obj;
- Heap::SetKeyedLookupCache(LookupCache::cast(obj));
+ KeyedLookupCache::Update(receiver_map, key, offset);
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
}
@@ -2645,9 +2606,13 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
// Attempt dictionary lookup.
Dictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindStringEntry(key);
- if ((entry != DescriptorArray::kNotFound) &&
+ if ((entry != Dictionary::kNotFound) &&
(dictionary->DetailsAt(entry).type() == NORMAL)) {
- return dictionary->ValueAt(entry);
+ Object* value = dictionary->ValueAt(entry);
+ if (receiver->IsJSGlobalObject()) {
+ value = JSGlobalPropertyCell::cast(value)->value();
+ }
+ return value;
}
}
}
@@ -2964,9 +2929,7 @@ static Object* Runtime_IsPropertyEnumerable(Arguments args) {
static Object* Runtime_GetPropertyNames(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
-
- CONVERT_CHECKED(JSObject, raw_object, args[0]);
- Handle<JSObject> object(raw_object);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
return *GetKeysFor(object);
}
@@ -3705,20 +3668,8 @@ static Object* Runtime_NumberMod(Arguments args) {
static Object* Runtime_StringAdd(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
-
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
- int len1 = str1->length();
- int len2 = str2->length();
- if (len1 == 0) return str2;
- if (len2 == 0) return str1;
- int length_sum = len1 + len2;
- // Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large to fit in a Smi.
- if (length_sum > Smi::kMaxValue || length_sum < 0) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
return Heap::AllocateConsString(str1, str2);
}
@@ -4153,39 +4104,69 @@ static Object* Runtime_Math_log(Arguments args) {
}
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+static double powi(double x, int y) {
+ ASSERT(y != kMinInt);
+ unsigned n = (y < 0) ? -y : y;
+ double m = x;
+ double p = 1;
+ while (true) {
+ if ((n & 1) != 0) p *= m;
+ n >>= 1;
+ if (n == 0) {
+ if (y < 0) {
+ // Unfortunately, we have to be careful when p has reached
+ // infinity in the computation, because sometimes the higher
+ // internal precision in the pow() implementation would have
+ // given us a finite p. This happens very rarely.
+ double result = 1.0 / p;
+ return (result == 0 && isinf(p))
+ ? pow(x, static_cast<double>(y)) // Avoid pow(double, int).
+ : result;
+ } else {
+ return p;
+ }
+ }
+ m *= m;
+ }
+}
+
+
static Object* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]);
+
+ // If the second argument is a smi, it is much faster to call the
+ // custom powi() function than the generic pow().
+ if (args[1]->IsSmi()) {
+ int y = Smi::cast(args[1])->value();
+ return Heap::AllocateHeapNumber(powi(x, y));
+ }
+
CONVERT_DOUBLE_CHECKED(y, args[1]);
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return Heap::nan_value();
+ if (y == 0.5) {
+ // It's not uncommon to use Math.pow(x, 0.5) to compute the square
+ // root of a number. To speed up such computations, we explictly
+ // check for this case and use the sqrt() function which is faster
+ // than pow().
+ return Heap::AllocateHeapNumber(sqrt(x));
+ } else if (y == -0.5) {
+ // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
+ return Heap::AllocateHeapNumber(1.0 / sqrt(x));
} else if (y == 0) {
return Smi::FromInt(1);
+ } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+ return Heap::nan_value();
} else {
return Heap::AllocateHeapNumber(pow(x, y));
}
}
-// Returns a number value with positive sign, greater than or equal to
-// 0 but less than 1, chosen randomly.
-static Object* Runtime_Math_random(Arguments args) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 0);
-
- // To get much better precision, we combine the results of two
- // invocations of random(). The result is computed by normalizing a
- // double in the range [0, RAND_MAX + 1) obtained by adding the
- // high-order bits in the range [0, RAND_MAX] with the low-order
- // bits in the range [0, 1).
- double lo = static_cast<double>(random()) * (1.0 / (RAND_MAX + 1.0));
- double hi = static_cast<double>(random());
- double result = (hi + lo) * (1.0 / (RAND_MAX + 1.0));
- ASSERT(result >= 0 && result < 1);
- return Heap::AllocateHeapNumber(result);
-}
-
static Object* Runtime_Math_round(Arguments args) {
NoHandleAllocation ha;
@@ -4300,45 +4281,61 @@ static Object* Runtime_NewClosure(Arguments args) {
}
+static Handle<Code> ComputeConstructStub(Handle<Map> map) {
+ // TODO(385): Change this to create a construct stub specialized for
+ // the given map to make allocation of simple objects - and maybe
+ // arrays - much faster.
+ return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric));
+}
+
+
static Object* Runtime_NewObject(Arguments args) {
- NoHandleAllocation ha;
+ HandleScope scope;
ASSERT(args.length() == 1);
- Object* constructor = args[0];
- if (constructor->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(constructor);
+ Handle<Object> constructor = args.at<Object>(0);
+
+ // If the constructor isn't a proper function we throw a type error.
+ if (!constructor->IsJSFunction()) {
+ Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
+ Handle<Object> type_error =
+ Factory::NewTypeError("not_constructor", arguments);
+ return Top::Throw(*type_error);
+ }
- // Handle stepping into constructors if step into is active.
+ Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::StepInActive()) {
- HandleScope scope;
- Debug::HandleStepIn(Handle<JSFunction>(function), 0, true);
- }
+ // Handle stepping into constructors if step into is active.
+ if (Debug::StepInActive()) {
+ Debug::HandleStepIn(function, 0, true);
+ }
#endif
- if (function->has_initial_map() &&
- function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
+ if (function->has_initial_map()) {
+ if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
// The 'Function' function ignores the receiver object when
// called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new
- // JSFunction. AllocateJSObject should not be used to allocate
- // JSFunctions since it does not properly initialize the shared
- // part of the function. Since the receiver is ignored anyway,
- // we use the global object as the receiver instead of a new
- // JSFunction object. This way, errors are reported the same
- // way whether or not 'Function' is called using 'new'.
+ // JSFunction. Factory::NewJSObject() should not be used to
+ // allocate JSFunctions since it does not properly initialize
+ // the shared part of the function. Since the receiver is
+ // ignored anyway, we use the global object as the receiver
+ // instead of a new JSFunction object. This way, errors are
+ // reported the same way whether or not 'Function' is called
+ // using 'new'.
return Top::context()->global();
}
- return Heap::AllocateJSObject(function);
}
- HandleScope scope;
- Handle<Object> cons(constructor);
- // The constructor is not a function; throw a type error.
- Handle<Object> type_error =
- Factory::NewTypeError("not_constructor", HandleVector(&cons, 1));
- return Top::Throw(*type_error);
+ bool first_allocation = !function->has_initial_map();
+ Handle<JSObject> result = Factory::NewJSObject(function);
+ if (first_allocation) {
+ Handle<Map> map = Handle<Map>(function->initial_map());
+ Handle<Code> stub = ComputeConstructStub(map);
+ function->shared()->set_construct_stub(*stub);
+ }
+ return *result;
}
@@ -4495,17 +4492,25 @@ static Object* Runtime_LookupContext(Arguments args) {
// compiler to do the right thing.
//
// TODO(1236026): This is a non-portable hack that should be removed.
-// TODO(x64): Definitely!
-typedef uint64_t ObjectPair;
+#ifdef V8_HOST_ARCH_64_BIT
+// Tested with GCC, not with MSVC.
+struct ObjectPair {
+ Object* x;
+ Object* y;
+};
static inline ObjectPair MakePair(Object* x, Object* y) {
-#if V8_HOST_ARCH_64_BIT
- UNIMPLEMENTED();
- return 0;
+ ObjectPair result = {x, y};
+ return result; // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
+}
#else
+typedef uint64_t ObjectPair;
+static inline ObjectPair MakePair(Object* x, Object* y) {
return reinterpret_cast<uint32_t>(x) |
(reinterpret_cast<ObjectPair>(y) << 32);
-#endif
}
+#endif
+
+
static inline Object* Unhole(Object* x, PropertyAttributes attributes) {
@@ -4539,7 +4544,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
ASSERT(args.length() == 2);
if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(IllegalOperation(), NULL);
+ return MakePair(Top::ThrowIllegalOperation(), NULL);
}
Handle<Context> context = args.at<Context>(0);
Handle<String> name = args.at<String>(1);
@@ -4821,8 +4826,8 @@ static Object* Runtime_DebugPrint(Arguments args) {
// and print some interesting cpu debugging info.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
- PrintF("fp = %p, sp = %p, pp = %p: ",
- frame->fp(), frame->sp(), frame->pp());
+ PrintF("fp = %p, sp = %p, caller_sp = %p: ",
+ frame->fp(), frame->sp(), frame->caller_sp());
} else {
PrintF("DebugPrint: ");
}
@@ -5533,15 +5538,12 @@ static Object* DebugLookupResultValue(Object* receiver, String* name,
bool* caught_exception) {
Object* value;
switch (result->type()) {
- case NORMAL: {
- Dictionary* dict =
- JSObject::cast(result->holder())->property_dictionary();
- value = dict->ValueAt(result->GetDictionaryEntry());
+ case NORMAL:
+ value = result->holder()->GetNormalizedProperty(result);
if (value->IsTheHole()) {
return Heap::undefined_value();
}
return value;
- }
case FIELD:
value =
JSObject::cast(
@@ -6106,6 +6108,405 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
}
+// Copy all the context locals into an object used to materialize a scope.
+static void CopyContextLocalsToScopeObject(Handle<Code> code,
+ ScopeInfo<>& scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ // Fill all context locals to the context extension.
+ for (int i = Context::MIN_CONTEXT_SLOTS;
+ i < scope_info.number_of_context_slots();
+ i++) {
+ int context_index =
+ ScopeInfo<>::ContextSlotIndex(*code,
+ *scope_info.context_slot_name(i),
+ NULL);
+
+ // Don't include the arguments shadow (.arguments) context variable.
+ if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
+ SetProperty(scope_object,
+ scope_info.context_slot_name(i),
+ Handle<Object>(context->get(context_index)), NONE);
+ }
+ }
+}
+
+
+// Create a plain JSObject which materializes the local scope for the specified
+// frame.
+static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<Code> code(function->code());
+ ScopeInfo<> scope_info(*code);
+
+ // Allocate and initialize a JSObject with all the arguments, stack locals
+ // heap locals and extension properties of the debugged function.
+ Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
+
+ // First fill all parameters.
+ for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+ SetProperty(local_scope,
+ scope_info.parameter_name(i),
+ Handle<Object>(frame->GetParameter(i)), NONE);
+ }
+
+ // Second fill all stack locals.
+ for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+ SetProperty(local_scope,
+ scope_info.stack_slot_name(i),
+ Handle<Object>(frame->GetExpression(i)), NONE);
+ }
+
+ // Third fill all context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->fcontext());
+ CopyContextLocalsToScopeObject(code, scope_info,
+ function_context, local_scope);
+
+ // Finally copy any properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsGlobalContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ SetProperty(local_scope, key, GetProperty(ext, key), NONE);
+ }
+ }
+ }
+ return local_scope;
+}
+
+
+// Create a plain JSObject which materializes the closure content for the
+// context.
+static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
+ ASSERT(context->is_function_context());
+
+ Handle<Code> code(context->closure()->code());
+ ScopeInfo<> scope_info(*code);
+
+ // Allocate and initialize a JSObject with all the content of theis function
+ // closure.
+ Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
+
+ // Check whether the arguments shadow object exists.
+ int arguments_shadow_index =
+ ScopeInfo<>::ContextSlotIndex(*code,
+ Heap::arguments_shadow_symbol(),
+ NULL);
+ if (arguments_shadow_index >= 0) {
+ // In this case all the arguments are available in the arguments shadow
+ // object.
+ Handle<JSObject> arguments_shadow(
+ JSObject::cast(context->get(arguments_shadow_index)));
+ for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+ SetProperty(closure_scope,
+ scope_info.parameter_name(i),
+ Handle<Object>(arguments_shadow->GetElement(i)), NONE);
+ }
+ }
+
+ // Fill all context locals to the context extension.
+ CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
+
+ // Finally copy any properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (context->has_extension()) {
+ Handle<JSObject> ext(JSObject::cast(context->extension()));
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ SetProperty(closure_scope, key, GetProperty(ext, key), NONE);
+ }
+ }
+
+ return closure_scope;
+}
+
+
+// Iterate over the actual scopes visible from a stack frame. All scopes are
+// backed by an actual context except the local scope, which is inserted
+// "artifically" in the context chain.
+class ScopeIterator {
+ public:
+ enum ScopeType {
+ ScopeTypeGlobal = 0,
+ ScopeTypeLocal,
+ ScopeTypeWith,
+ ScopeTypeClosure
+ };
+
+ explicit ScopeIterator(JavaScriptFrame* frame)
+ : frame_(frame),
+ function_(JSFunction::cast(frame->function())),
+ context_(Context::cast(frame->context())),
+ local_done_(false),
+ at_local_(false) {
+
+ // Check whether the first scope is actually a local scope.
+ if (context_->IsGlobalContext()) {
+ // If there is a stack slot for .result then this local scope has been
+ // created for evaluating top level code and it is not a real local scope.
+ // Checking for the existence of .result seems fragile, but the scope info
+ // saved with the code object does not otherwise have that information.
+ Handle<Code> code(function_->code());
+ int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
+ at_local_ = index < 0;
+ } else if (context_->is_function_context()) {
+ at_local_ = true;
+ }
+ }
+
+ // More scopes?
+ bool Done() { return context_.is_null(); }
+
+ // Move to the next scope.
+ void Next() {
+ // If at a local scope mark the local scope as passed.
+ if (at_local_) {
+ at_local_ = false;
+ local_done_ = true;
+
+ // If the current context is not associated with the local scope the
+ // current context is the next real scope, so don't move to the next
+ // context in this case.
+ if (context_->closure() != *function_) {
+ return;
+ }
+ }
+
+ // The global scope is always the last in the chain.
+ if (context_->IsGlobalContext()) {
+ context_ = Handle<Context>();
+ return;
+ }
+
+ // Move to the next context.
+ if (context_->is_function_context()) {
+ context_ = Handle<Context>(Context::cast(context_->closure()->context()));
+ } else {
+ context_ = Handle<Context>(context_->previous());
+ }
+
+ // If passing the local scope indicate that the current scope is now the
+ // local scope.
+ if (!local_done_ &&
+ (context_->IsGlobalContext() || (context_->is_function_context()))) {
+ at_local_ = true;
+ }
+ }
+
+ // Return the type of the current scope.
+ int Type() {
+ if (at_local_) {
+ return ScopeTypeLocal;
+ }
+ if (context_->IsGlobalContext()) {
+ ASSERT(context_->global()->IsGlobalObject());
+ return ScopeTypeGlobal;
+ }
+ if (context_->is_function_context()) {
+ return ScopeTypeClosure;
+ }
+ ASSERT(context_->has_extension());
+ ASSERT(!context_->extension()->IsJSContextExtensionObject());
+ return ScopeTypeWith;
+ }
+
+ // Return the JavaScript object with the content of the current scope.
+ Handle<JSObject> ScopeObject() {
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ return Handle<JSObject>(CurrentContext()->global());
+ break;
+ case ScopeIterator::ScopeTypeLocal:
+ // Materialize the content of the local scope into a JSObject.
+ return MaterializeLocalScope(frame_);
+ break;
+ case ScopeIterator::ScopeTypeWith:
+ // Return the with object.
+ return Handle<JSObject>(CurrentContext()->extension());
+ break;
+ case ScopeIterator::ScopeTypeClosure:
+ // Materialize the content of the closure scope into a JSObject.
+ return MaterializeClosure(CurrentContext());
+ break;
+ }
+ UNREACHABLE();
+ return Handle<JSObject>();
+ }
+
+ // Return the context for this scope. For the local context there might not
+ // be an actual context.
+ Handle<Context> CurrentContext() {
+ if (at_local_ && context_->closure() != *function_) {
+ return Handle<Context>();
+ }
+ return context_;
+ }
+
+#ifdef DEBUG
+ // Debug print of the content of the current scope.
+ void DebugPrint() {
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ PrintF("Global:\n");
+ CurrentContext()->Print();
+ break;
+
+ case ScopeIterator::ScopeTypeLocal: {
+ PrintF("Local:\n");
+ Handle<Code> code(function_->code());
+ ScopeInfo<> scope_info(*code);
+ scope_info.Print();
+ if (!CurrentContext().is_null()) {
+ CurrentContext()->Print();
+ if (CurrentContext()->has_extension()) {
+ Handle<JSObject> extension =
+ Handle<JSObject>(CurrentContext()->extension());
+ if (extension->IsJSContextExtensionObject()) {
+ extension->Print();
+ }
+ }
+ }
+ break;
+ }
+
+ case ScopeIterator::ScopeTypeWith: {
+ PrintF("With:\n");
+ Handle<JSObject> extension =
+ Handle<JSObject>(CurrentContext()->extension());
+ extension->Print();
+ break;
+ }
+
+ case ScopeIterator::ScopeTypeClosure: {
+ PrintF("Closure:\n");
+ CurrentContext()->Print();
+ if (CurrentContext()->has_extension()) {
+ Handle<JSObject> extension =
+ Handle<JSObject>(CurrentContext()->extension());
+ if (extension->IsJSContextExtensionObject()) {
+ extension->Print();
+ }
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ PrintF("\n");
+ }
+#endif
+
+ private:
+ JavaScriptFrame* frame_;
+ Handle<JSFunction> function_;
+ Handle<Context> context_;
+ bool local_done_;
+ bool at_local_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
+};
+
+
+static Object* Runtime_GetScopeCount(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+
+ // Check arguments.
+ Object* check = Runtime_CheckExecutionState(args);
+ if (check->IsFailure()) return check;
+ CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator it(id);
+ JavaScriptFrame* frame = it.frame();
+
+ // Count the visible scopes.
+ int n = 0;
+ for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ n++;
+ }
+
+ return Smi::FromInt(n);
+}
+
+
+static const int kScopeDetailsTypeIndex = 0;
+static const int kScopeDetailsObjectIndex = 1;
+static const int kScopeDetailsSize = 2;
+
+// Return an array with scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: scope index
+//
+// The array returned contains the following information:
+// 0: Scope type
+// 1: Scope object
+static Object* Runtime_GetScopeDetails(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+
+ // Check arguments.
+ Object* check = Runtime_CheckExecutionState(args);
+ if (check->IsFailure()) return check;
+ CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ // Find the requested scope.
+ int n = 0;
+ ScopeIterator it(frame);
+ for (; !it.Done() && n < index; it.Next()) {
+ n++;
+ }
+ if (it.Done()) {
+ return Heap::undefined_value();
+ }
+
+ // Calculate the size of the result.
+ int details_size = kScopeDetailsSize;
+ Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+
+ // Fill in scope details.
+ details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
+ details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
+
+ return *Factory::NewJSArrayWithElements(details);
+}
+
+
+static Object* Runtime_DebugPrintScopes(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 0);
+
+#ifdef DEBUG
+ // Print the scopes for the top frame.
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+ for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+ it.DebugPrint();
+ }
+#endif
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_GetCFrames(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -6228,8 +6629,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
- Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points
Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
if (break_locations->IsUndefined()) return Heap::undefined_value();
@@ -6246,8 +6647,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0);
- Handle<SharedFunctionInfo> shared(raw_fun->shared());
+ CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ Handle<SharedFunctionInfo> shared(fun->shared());
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0);
Handle<Object> break_point_object_arg = args.at<Object>(2);
@@ -6568,54 +6969,17 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
ASSERT(go_between_sinfo.number_of_context_slots() == 0);
#endif
- // Allocate and initialize a context extension object with all the
- // arguments, stack locals heap locals and extension properties of the
- // debugged function.
- Handle<JSObject> context_ext = Factory::NewJSObject(Top::object_function());
- // First fill all parameters to the context extension.
- for (int i = 0; i < sinfo.number_of_parameters(); ++i) {
- SetProperty(context_ext,
- sinfo.parameter_name(i),
- Handle<Object>(frame->GetParameter(i)), NONE);
- }
- // Second fill all stack locals to the context extension.
- for (int i = 0; i < sinfo.number_of_stack_slots(); i++) {
- SetProperty(context_ext,
- sinfo.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i)), NONE);
- }
- // Third fill all context locals to the context extension.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- for (int i = Context::MIN_CONTEXT_SLOTS;
- i < sinfo.number_of_context_slots();
- ++i) {
- int context_index =
- ScopeInfo<>::ContextSlotIndex(*code, *sinfo.context_slot_name(i), NULL);
- SetProperty(context_ext,
- sinfo.context_slot_name(i),
- Handle<Object>(function_context->get(context_index)), NONE);
- }
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- SetProperty(context_ext, key, GetProperty(ext, key), NONE);
- }
- }
+ // Materialize the content of the local scope into a JSObject.
+ Handle<JSObject> local_scope = MaterializeLocalScope(frame);
// Allocate a new context for the debug evaluation and set the extension
// object build.
Handle<Context> context =
Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
- context->set_extension(*context_ext);
+ context->set_extension(*local_scope);
// Copy any with contexts present and chain them in front of this context.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->fcontext());
context = CopyWithContextChain(frame_context, context);
// Wrap the evaluation statement in a new function compiled in the newly
@@ -6657,6 +7021,13 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
argc, argv, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
+
+ // Skip the global proxy as it has no properties and always delegates to the
+ // real global object.
+ if (result->IsJSGlobalProxy()) {
+ result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
+ }
+
return *result;
}
@@ -7012,6 +7383,67 @@ static Object* Runtime_GetScript(Arguments args) {
}
+// Determines whether the given stack frame should be displayed in
+// a stack trace. The caller is the error constructor that asked
+// for the stack trace to be collected. The first time a construct
+// call to this function is encountered it is skipped. The seen_caller
+// in/out parameter is used to remember if the caller has been seen
+// yet.
+static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
+ bool* seen_caller) {
+ // Only display JS frames.
+ if (!raw_frame->is_java_script())
+ return false;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ Object* raw_fun = frame->function();
+ // Not sure when this can happen but skip it just in case.
+ if (!raw_fun->IsJSFunction())
+ return false;
+ if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) {
+ *seen_caller = true;
+ return false;
+ }
+ // Skip the most obvious builtin calls. Some builtin calls (such as
+ // Number.ADD which is invoked using 'call') are very difficult to
+ // recognize so we're leaving them in for now.
+ return !frame->receiver()->IsJSBuiltinsObject();
+}
+
+
+// Collect the raw data for a stack trace. Returns an array of three
+// element segments each containing a receiver, function and native
+// code offset.
+static Object* Runtime_CollectStackTrace(Arguments args) {
+ ASSERT_EQ(args.length(), 1);
+ Object* caller = args[0];
+
+ StackFrameIterator iter;
+ int frame_count = 0;
+ bool seen_caller = false;
+ while (!iter.done()) {
+ if (ShowFrameInStackTrace(iter.frame(), caller, &seen_caller))
+ frame_count++;
+ iter.Advance();
+ }
+ HandleScope scope;
+ Handle<JSArray> result = Factory::NewJSArray(frame_count * 3);
+ int i = 0;
+ seen_caller = false;
+ for (iter.Reset(); !iter.done(); iter.Advance()) {
+ StackFrame* raw_frame = iter.frame();
+ if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) {
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ result->SetElement(i++, frame->receiver());
+ result->SetElement(i++, frame->function());
+ Address pc = frame->pc();
+ Address start = frame->code()->address();
+ result->SetElement(i++, Smi::FromInt(pc - start));
+ }
+ }
+ return *result;
+}
+
+
static Object* Runtime_Abort(Arguments args) {
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
diff --git a/V8Binding/v8/src/runtime.h b/V8Binding/v8/src/runtime.h
index 30bb7c5..36e274a 100644
--- a/V8Binding/v8/src/runtime.h
+++ b/V8Binding/v8/src/runtime.h
@@ -135,7 +135,6 @@ namespace internal {
F(Math_floor, 1) \
F(Math_log, 1) \
F(Math_pow, 2) \
- F(Math_random, 0) \
F(Math_round, 1) \
F(Math_sin, 1) \
F(Math_sqrt, 1) \
@@ -170,18 +169,12 @@ namespace internal {
F(FunctionGetSourceCode, 1) \
F(FunctionGetScript, 1) \
F(FunctionGetScriptSourcePosition, 1) \
+ F(FunctionGetPositionForOffset, 2) \
F(FunctionIsAPIFunction, 1) \
F(GetScript, 1) \
+ F(CollectStackTrace, 1) \
\
F(ClassOf, 1) \
- F(HasDateClass, 1) \
- F(HasStringClass, 1) \
- F(HasArrayClass, 1) \
- F(HasFunctionClass, 1) \
- F(HasNumberClass, 1) \
- F(HasBooleanClass, 1) \
- F(HasArgumentsClass, 1) \
- F(HasRegExpClass, 1) \
F(SetCode, 2) \
\
F(CreateApiFunction, 1) \
@@ -288,6 +281,9 @@ namespace internal {
F(CheckExecutionState, 1) \
F(GetFrameCount, 1) \
F(GetFrameDetails, 2) \
+ F(GetScopeCount, 2) \
+ F(GetScopeDetails, 3) \
+ F(DebugPrintScopes, 0) \
F(GetCFrames, 1) \
F(GetThreadCount, 1) \
F(GetThreadDetails, 2) \
diff --git a/V8Binding/v8/src/runtime.js b/V8Binding/v8/src/runtime.js
index c8ccf9f..25cc5ba 100644
--- a/V8Binding/v8/src/runtime.js
+++ b/V8Binding/v8/src/runtime.js
@@ -97,12 +97,12 @@ function STRICT_EQUALS(x) {
if (IS_STRING(this)) {
if (!IS_STRING(x)) return 1; // not equal
return %StringEquals(this, x);
- }
+ }
if (IS_NUMBER(this)) {
if (!IS_NUMBER(x)) return 1; // not equal
return %NumberEquals(this, x);
- }
+ }
// If anything else gets here, we just do simple identity check.
// Objects (including functions), null, undefined and booleans were
@@ -148,7 +148,7 @@ function ADD(x) {
// Default implementation.
var a = %ToPrimitive(this, NO_HINT);
var b = %ToPrimitive(x, NO_HINT);
-
+
if (IS_STRING(a)) {
return %StringAdd(a, %ToString(b));
} else if (IS_STRING(b)) {
@@ -160,40 +160,48 @@ function ADD(x) {
// Left operand (this) is already a string.
-function STRING_ADD_LEFT(x) {
- x = %ToString(%ToPrimitive(x, NO_HINT));
- return %StringAdd(this, x);
+function STRING_ADD_LEFT(y) {
+ if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT));
+ return %StringAdd(this, y);
}
-// Right operand (x) is already a string.
-function STRING_ADD_RIGHT(x) {
- var a = %ToString(%ToPrimitive(this, NO_HINT));
- return %StringAdd(a, x);
+// Right operand (y) is already a string.
+function STRING_ADD_RIGHT(y) {
+ var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT));
+ return %StringAdd(x, y);
}
// ECMA-262, section 11.6.2, page 50.
-function SUB(x) {
- return %NumberSub(%ToNumber(this), %ToNumber(x));
+function SUB(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberSub(x, y);
}
// ECMA-262, section 11.5.1, page 48.
-function MUL(x) {
- return %NumberMul(%ToNumber(this), %ToNumber(x));
+function MUL(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberMul(x, y);
}
// ECMA-262, section 11.5.2, page 49.
-function DIV(x) {
- return %NumberDiv(%ToNumber(this), %ToNumber(x));
+function DIV(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberDiv(x, y);
}
// ECMA-262, section 11.5.3, page 49.
-function MOD(x) {
- return %NumberMod(%ToNumber(this), %ToNumber(x));
+function MOD(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberMod(x, y);
}
@@ -204,50 +212,92 @@ function MOD(x) {
*/
// ECMA-262, section 11.10, page 57.
-function BIT_OR(x) {
- return %NumberOr(%ToNumber(this), %ToNumber(x));
+function BIT_OR(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberOr(x, y);
}
// ECMA-262, section 11.10, page 57.
-function BIT_AND(x) {
- return %NumberAnd(%ToNumber(this), %ToNumber(x));
+function BIT_AND(y) {
+ var x;
+ if (IS_NUMBER(this)) {
+ x = this;
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ } else {
+ x = %ToNumber(this);
+ // Make sure to convert the right operand to a number before
+ // bailing out in the fast case, but after converting the
+ // left operand. This ensures that valueOf methods on the right
+ // operand are always executed.
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ // Optimize for the case where we end up AND'ing a value
+ // that doesn't convert to a number. This is common in
+ // certain benchmarks.
+ if (NUMBER_IS_NAN(x)) return 0;
+ }
+ return %NumberAnd(x, y);
}
// ECMA-262, section 11.10, page 57.
-function BIT_XOR(x) {
- return %NumberXor(%ToNumber(this), %ToNumber(x));
+function BIT_XOR(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberXor(x, y);
}
// ECMA-262, section 11.4.7, page 47.
function UNARY_MINUS() {
- return %NumberUnaryMinus(%ToNumber(this));
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ return %NumberUnaryMinus(x);
}
// ECMA-262, section 11.4.8, page 48.
function BIT_NOT() {
- return %NumberNot(%ToNumber(this));
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ return %NumberNot(x);
}
// ECMA-262, section 11.7.1, page 51.
-function SHL(x) {
- return %NumberShl(%ToNumber(this), %ToNumber(x));
+function SHL(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberShl(x, y);
}
// ECMA-262, section 11.7.2, page 51.
-function SAR(x) {
- return %NumberSar(%ToNumber(this), %ToNumber(x));
+function SAR(y) {
+ var x;
+ if (IS_NUMBER(this)) {
+ x = this;
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ } else {
+ x = %ToNumber(this);
+ // Make sure to convert the right operand to a number before
+ // bailing out in the fast case, but after converting the
+ // left operand. This ensures that valueOf methods on the right
+ // operand are always executed.
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ // Optimize for the case where we end up shifting a value
+ // that doesn't convert to a number. This is common in
+ // certain benchmarks.
+ if (NUMBER_IS_NAN(x)) return 0;
+ }
+ return %NumberSar(x, y);
}
// ECMA-262, section 11.7.3, page 52.
-function SHR(x) {
- return %NumberShr(%ToNumber(this), %ToNumber(x));
+function SHR(y) {
+ var x = IS_NUMBER(this) ? this : %ToNumber(this);
+ if (!IS_NUMBER(y)) y = %ToNumber(y);
+ return %NumberShr(x, y);
}
@@ -341,9 +391,10 @@ function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
function APPLY_PREPARE(args) {
var length;
- // First check whether length is a positive Smi and args is an array. This is the
- // fast case. If this fails, we do the slow case that takes care of more eventualities
- if (%_IsArray(args)) {
+ // First check whether length is a positive Smi and args is an
+ // array. This is the fast case. If this fails, we do the slow case
+ // that takes care of more eventualities.
+ if (IS_ARRAY(args)) {
length = args.length;
if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
return length;
@@ -364,9 +415,7 @@ function APPLY_PREPARE(args) {
}
// Make sure the arguments list has the right type.
- if (args != null &&
- !%HasArrayClass(args) &&
- !%HasArgumentsClass(args)) {
+ if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
throw %MakeTypeError('apply_wrong_args', []);
}
diff --git a/V8Binding/v8/src/scopeinfo.cc b/V8Binding/v8/src/scopeinfo.cc
index fedfbd6..8a237fd 100644
--- a/V8Binding/v8/src/scopeinfo.cc
+++ b/V8Binding/v8/src/scopeinfo.cc
@@ -432,10 +432,13 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
String* name,
Variable::Mode* mode) {
ASSERT(name->IsSymbol());
+ int result = ContextSlotCache::Lookup(code, name, mode);
+ if (result != ContextSlotCache::kNotFound) return result;
if (code->sinfo_size() > 0) {
// Loop below depends on the NULL sentinel after the context slot names.
ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
*(ContextEntriesAddr(code) + 1) == NULL);
+
// slots start after length entry
Object** p0 = ContextEntriesAddr(code) + 1;
Object** p = p0;
@@ -443,14 +446,18 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
while (*p != NULL) {
if (*p == name) {
ASSERT(((p - p0) & 1) == 0);
- if (mode != NULL) {
- ReadInt(p + 1, reinterpret_cast<int*>(mode));
- }
- return ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ int v;
+ ReadInt(p + 1, &v);
+ Variable::Mode mode_value = static_cast<Variable::Mode>(v);
+ if (mode != NULL) *mode = mode_value;
+ result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ ContextSlotCache::Update(code, name, mode_value, result);
+ return result;
}
p += 2;
}
}
+ ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
return -1;
}
@@ -526,7 +533,78 @@ int ScopeInfo<Allocator>::NumberOfLocals() const {
}
+int ContextSlotCache::Hash(Code* code, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
+ return (addr_hash ^ name->Hash()) % kLength;
+}
+
+
+int ContextSlotCache::Lookup(Code* code,
+ String* name,
+ Variable::Mode* mode) {
+ int index = Hash(code, name);
+ Key& key = keys_[index];
+ if ((key.code == code) && key.name->Equals(name)) {
+ Value result(values_[index]);
+ if (mode != NULL) *mode = result.mode();
+ return result.index() + kNotFound;
+ }
+ return kNotFound;
+}
+
+
+void ContextSlotCache::Update(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index) {
+ String* symbol;
+ ASSERT(slot_index > kNotFound);
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(code, symbol);
+ Key& key = keys_[index];
+ key.code = code;
+ key.name = symbol;
+ // Please note value only takes a uint as index.
+ values_[index] = Value(mode, slot_index - kNotFound).raw();
+#ifdef DEBUG
+ ValidateEntry(code, name, mode, slot_index);
+#endif
+ }
+}
+
+
+void ContextSlotCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
+}
+
+
+ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
+
+
+uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
+
+
#ifdef DEBUG
+
+void ContextSlotCache::ValidateEntry(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index) {
+ String* symbol;
+ if (Heap::LookupSymbolIfExists(name, &symbol)) {
+ int index = Hash(code, name);
+ Key& key = keys_[index];
+ ASSERT(key.code == code);
+ ASSERT(key.name->Equals(name));
+ Value result(values_[index]);
+ ASSERT(result.mode() == mode);
+ ASSERT(result.index() + kNotFound == slot_index);
+ }
+}
+
+
template <class Allocator>
static void PrintList(const char* list_name,
int nof_internal_slots,
diff --git a/V8Binding/v8/src/scopeinfo.h b/V8Binding/v8/src/scopeinfo.h
index a097d34..28d169a 100644
--- a/V8Binding/v8/src/scopeinfo.h
+++ b/V8Binding/v8/src/scopeinfo.h
@@ -163,6 +163,74 @@ class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
};
+// Cache for mapping (code, property name) into context slot index.
+// The cache contains both positive and negative results.
+// Slot index equals -1 means the property is absent.
+// Cleared at startup and prior to mark sweep collection.
+class ContextSlotCache {
+ public:
+ // Lookup context slot index for (code, name).
+ // If absent, kNotFound is returned.
+ static int Lookup(Code* code,
+ String* name,
+ Variable::Mode* mode);
+
+ // Update an element in the cache.
+ static void Update(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
+
+ // Clear the cache.
+ static void Clear();
+
+ static const int kNotFound = -2;
+ private:
+ inline static int Hash(Code* code, String* name);
+
+#ifdef DEBUG
+ static void ValidateEntry(Code* code,
+ String* name,
+ Variable::Mode mode,
+ int slot_index);
+#endif
+
+ static const int kLength = 256;
+ struct Key {
+ Code* code;
+ String* name;
+ };
+
+ struct Value {
+ Value(Variable::Mode mode, int index) {
+ ASSERT(ModeField::is_valid(mode));
+ ASSERT(IndexField::is_valid(index));
+ value_ = ModeField::encode(mode) | IndexField::encode(index);
+ ASSERT(mode == this->mode());
+ ASSERT(index == this->index());
+ }
+
+ inline Value(uint32_t value) : value_(value) {}
+
+ uint32_t raw() { return value_; }
+
+ Variable::Mode mode() { return ModeField::decode(value_); }
+
+ int index() { return IndexField::decode(value_); }
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class ModeField: public BitField<Variable::Mode, 0, 3> {};
+ class IndexField: public BitField<int, 3, 32-3> {};
+ private:
+ uint32_t value_;
+ };
+
+ static Key keys_[kLength];
+ static uint32_t values_[kLength];
+};
+
+
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/V8Binding/v8/src/scopes.cc b/V8Binding/v8/src/scopes.cc
index 7122eb0..88b1c66 100644
--- a/V8Binding/v8/src/scopes.cc
+++ b/V8Binding/v8/src/scopes.cc
@@ -81,12 +81,12 @@ Variable* LocalsMap::Declare(Scope* scope,
Handle<String> name,
Variable::Mode mode,
bool is_valid_LHS,
- bool is_this) {
+ Variable::Kind kind) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
- p->value = new Variable(scope, name, mode, is_valid_LHS, is_this);
+ p->value = new Variable(scope, name, mode, is_valid_LHS, kind);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -169,7 +169,8 @@ void Scope::Initialize(bool inside_with) {
// such parameter is 'this' which is passed on the stack when
// invoking scripts
{ Variable* var =
- locals_.Declare(this, Factory::this_symbol(), Variable::VAR, false, true);
+ locals_.Declare(this, Factory::this_symbol(), Variable::VAR,
+ false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
receiver_->BindTo(var);
@@ -179,7 +180,8 @@ void Scope::Initialize(bool inside_with) {
// Declare 'arguments' variable which exists in all functions.
// Note that it may never be accessed, in which case it won't
// be allocated during variable allocation.
- Declare(Factory::arguments_symbol(), Variable::VAR);
+ locals_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
+ true, Variable::ARGUMENTS);
}
}
@@ -203,7 +205,7 @@ Variable* Scope::Lookup(Handle<String> name) {
Variable* Scope::DeclareFunctionVar(Handle<String> name) {
ASSERT(is_function_scope() && function_ == NULL);
- function_ = new Variable(this, name, Variable::CONST, true, false);
+ function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
return function_;
}
@@ -213,7 +215,7 @@ Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) {
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- return locals_.Declare(this, name, mode, true, false);
+ return locals_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -247,7 +249,8 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
VariableProxy* Scope::NewTemporary(Handle<String> name) {
- Variable* var = new Variable(this, name, Variable::TEMPORARY, true, false);
+ Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
+ Variable::NORMAL);
VariableProxy* tmp = new VariableProxy(name, false, false);
tmp->BindTo(var);
temps_.Add(var);
@@ -503,7 +506,7 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
Variable* var = map->Lookup(name);
if (var == NULL) {
// Declare a new non-local.
- var = map->Declare(NULL, name, mode, true, false);
+ var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup.
var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
}
@@ -619,7 +622,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// We must have a global variable.
ASSERT(global_scope != NULL);
var = new Variable(global_scope, proxy->name(),
- Variable::DYNAMIC, true, false);
+ Variable::DYNAMIC, true, Variable::NORMAL);
} else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up
@@ -797,7 +800,7 @@ void Scope::AllocateParameterLocals() {
// are never allocated in the context).
Variable* arguments_shadow =
new Variable(this, Factory::arguments_shadow_symbol(),
- Variable::INTERNAL, true, false);
+ Variable::INTERNAL, true, Variable::ARGUMENTS);
arguments_shadow_ =
new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
arguments_shadow_->BindTo(arguments_shadow);
diff --git a/V8Binding/v8/src/scopes.h b/V8Binding/v8/src/scopes.h
index b2f61ef..ea4e0f7 100644
--- a/V8Binding/v8/src/scopes.h
+++ b/V8Binding/v8/src/scopes.h
@@ -47,7 +47,7 @@ class LocalsMap: public HashMap {
virtual ~LocalsMap();
Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode,
- bool is_valid_LHS, bool is_this);
+ bool is_valid_LHS, Variable::Kind kind);
Variable* Lookup(Handle<String> name);
};
diff --git a/V8Binding/v8/src/serialize.cc b/V8Binding/v8/src/serialize.cc
index fb66d27..2939c01 100644
--- a/V8Binding/v8/src/serialize.cc
+++ b/V8Binding/v8/src/serialize.cc
@@ -450,20 +450,26 @@ void ExternalReferenceTable::AddFromId(TypeCode type,
const char* name) {
Address address;
switch (type) {
- case C_BUILTIN:
- address = Builtins::c_function_address(
- static_cast<Builtins::CFunctionId>(id));
+ case C_BUILTIN: {
+ ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
+ address = ref.address();
break;
- case BUILTIN:
- address = Builtins::builtin_address(static_cast<Builtins::Name>(id));
+ }
+ case BUILTIN: {
+ ExternalReference ref(static_cast<Builtins::Name>(id));
+ address = ref.address();
break;
- case RUNTIME_FUNCTION:
- address = Runtime::FunctionForId(
- static_cast<Runtime::FunctionId>(id))->entry;
+ }
+ case RUNTIME_FUNCTION: {
+ ExternalReference ref(static_cast<Runtime::FunctionId>(id));
+ address = ref.address();
break;
- case IC_UTILITY:
- address = IC::AddressFromUtilityId(static_cast<IC::UtilityId>(id));
+ }
+ case IC_UTILITY: {
+ ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
+ address = ref.address();
break;
+ }
default:
UNREACHABLE();
return;
@@ -642,10 +648,14 @@ void ExternalReferenceTable::PopulateTable() {
"StubCache::secondary_->value");
// Runtime entries
- Add(FUNCTION_ADDR(Runtime::PerformGC),
+ Add(ExternalReference::perform_gc_function().address(),
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
+ Add(ExternalReference::random_positive_smi_function().address(),
+ RUNTIME_ENTRY,
+ 2,
+ "V8::RandomPositiveSmi");
// Miscellaneous
Add(ExternalReference::builtin_passed_function().address(),
@@ -701,6 +711,10 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
13,
"mul_two_doubles");
+ Add(ExternalReference::compare_doubles().address(),
+ UNCLASSIFIED,
+ 14,
+ "compare_doubles");
#endif
}
@@ -1251,15 +1265,19 @@ RelativeAddress Serializer::Allocate(HeapObject* obj) {
found = Heap::InSpace(obj, s);
}
CHECK(found);
+ int size = obj->Size();
if (s == NEW_SPACE) {
- Space* space = Heap::TargetSpace(obj);
- ASSERT(space == Heap::old_pointer_space() ||
- space == Heap::old_data_space());
- s = (space == Heap::old_pointer_space()) ?
- OLD_POINTER_SPACE :
- OLD_DATA_SPACE;
+ if (size > Heap::MaxObjectSizeInPagedSpace()) {
+ s = LO_SPACE;
+ } else {
+ OldSpace* space = Heap::TargetSpace(obj);
+ ASSERT(space == Heap::old_pointer_space() ||
+ space == Heap::old_data_space());
+ s = (space == Heap::old_pointer_space()) ?
+ OLD_POINTER_SPACE :
+ OLD_DATA_SPACE;
+ }
}
- int size = obj->Size();
GCTreatment gc_treatment = DataObject;
if (obj->IsFixedArray()) gc_treatment = PointerObject;
else if (obj->IsCode()) gc_treatment = CodeObject;
diff --git a/V8Binding/v8/src/spaces.cc b/V8Binding/v8/src/spaces.cc
index 72b028c..077bcab 100644
--- a/V8Binding/v8/src/spaces.cc
+++ b/V8Binding/v8/src/spaces.cc
@@ -1265,7 +1265,7 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- if (size_in_bytes > Array::kHeaderSize) {
+ if (size_in_bytes > ByteArray::kHeaderSize) {
set_map(Heap::byte_array_map());
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index a62b0a8..8ce807f 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -1041,7 +1041,6 @@ class SemiSpaceIterator : public ObjectIterator {
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
- ASSERT_OBJECT_SIZE(size);
current_ += size;
return object;
@@ -1271,7 +1270,7 @@ class FreeListNode: public HeapObject {
inline void set_next(Address next);
private:
- static const int kNextOffset = Array::kHeaderSize;
+ static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
};
@@ -1305,7 +1304,8 @@ class OldSpaceFreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes. (Smaller allocations are allowed, but
// will always result in waste.)
- static const int kMinBlockSize = Array::kHeaderSize + kPointerSize;
+ static const int kMinBlockSize =
+ POINTER_SIZE_ALIGN(ByteArray::kHeaderSize) + kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
// The identity of the owning space, for building allocation Failure
diff --git a/V8Binding/v8/src/string.js b/V8Binding/v8/src/string.js
index df1f393..6164eb8 100644
--- a/V8Binding/v8/src/string.js
+++ b/V8Binding/v8/src/string.js
@@ -35,7 +35,7 @@
// Set the String function and constructor.
%SetCode($String, function(x) {
var value = %_ArgumentsLength() == 0 ? '' : ToString(x);
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
@@ -46,7 +46,7 @@
// ECMA-262 section 15.5.4.2
function StringToString() {
- if (!IS_STRING(this) && !%HasStringClass(this))
+ if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
throw new $TypeError('String.prototype.toString is not generic');
return %_ValueOf(this);
}
@@ -54,7 +54,7 @@ function StringToString() {
// ECMA-262 section 15.5.4.3
function StringValueOf() {
- if (!IS_STRING(this) && !%HasStringClass(this))
+ if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
throw new $TypeError('String.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@@ -370,10 +370,10 @@ function addCaptureString(builder, matchInfo, index) {
// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
// should be 'abcd' and not 'dddd' (or anything else).
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var result = new ReplaceResultBuilder(subject);
var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
if (IS_NULL(lastMatchInfo)) return subject;
+ var result = new ReplaceResultBuilder(subject);
// There's at least one match. If the regexp is global, we have to loop
// over all matches. The loop is not in C++ code here like the one in
// RegExp.prototype.exec, because of the interleaved function application.
@@ -498,10 +498,8 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
var subject = ToString(this);
- var result = [];
- var lim = (limit === void 0) ? 0xffffffff : ToUint32(limit);
-
- if (lim === 0) return result;
+ limit = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+ if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
// be an array of size 1 containing the entire string. SpiderMonkey
@@ -509,28 +507,31 @@ function StringSplit(separator, limit) {
// undefined is explicitly given, they convert it to a string and
// use that. We do as SpiderMonkey and KJS.
if (%_ArgumentsLength() === 0) {
- result[result.length] = subject;
- return result;
+ return [subject];
}
var length = subject.length;
- var currentIndex = 0;
- var startIndex = 0;
-
- var sep;
if (IS_REGEXP(separator)) {
- sep = separator;
- %_Log('regexp', 'regexp-split,%0S,%1r', [subject, sep]);
+ %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
} else {
- sep = ToString(separator);
+ separator = ToString(separator);
+ // If the separator string is empty then return the elements in the subject.
+ if (separator.length == 0) {
+ var result = $Array(length);
+ for (var i = 0; i < length; i++) result[i] = subject[i];
+ return result;
+ }
}
if (length === 0) {
- if (splitMatch(sep, subject, 0, 0) != null) return result;
- result[result.length] = subject;
- return result;
+ if (splitMatch(separator, subject, 0, 0) != null) return [];
+ return [subject];
}
+ var currentIndex = 0;
+ var startIndex = 0;
+ var result = [];
+
while (true) {
if (startIndex === length) {
@@ -538,7 +539,7 @@ function StringSplit(separator, limit) {
return result;
}
- var lastMatchInfo = splitMatch(sep, subject, currentIndex, startIndex);
+ var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
if (IS_NULL(lastMatchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
@@ -553,21 +554,18 @@ function StringSplit(separator, limit) {
continue;
}
- result[result.length] =
- SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
- if (result.length === lim) return result;
+ result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+ if (result.length === limit) return result;
for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
var start = lastMatchInfo[CAPTURE(i)];
var end = lastMatchInfo[CAPTURE(i + 1)];
if (start != -1 && end != -1) {
- result[result.length] = SubString(subject,
- lastMatchInfo[CAPTURE(i)],
- lastMatchInfo[CAPTURE(i + 1)]);
+ result[result.length] = SubString(subject, start, end);
} else {
result[result.length] = void 0;
}
- if (result.length === lim) return result;
+ if (result.length === limit) return result;
}
startIndex = currentIndex = endIndex;
diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc
index f7e5456..1999d13 100644
--- a/V8Binding/v8/src/stub-cache.cc
+++ b/V8Binding/v8/src/stub-cache.cc
@@ -103,7 +103,7 @@ Object* StubCache::ComputeLoadField(String* name,
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
@@ -122,7 +122,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(receiver, holder, callback, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
@@ -141,7 +141,7 @@ Object* StubCache::ComputeLoadConstant(String* name,
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
@@ -158,7 +158,7 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
@@ -172,6 +172,24 @@ Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
}
+Object* StubCache::ComputeLoadGlobal(String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell,
+ bool is_dont_delete) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ LoadStubCompiler compiler;
+ code = compiler.CompileLoadGlobal(receiver, cell, name, is_dont_delete);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
Object* StubCache::ComputeKeyedLoadField(String* name,
JSObject* receiver,
JSObject* holder,
@@ -182,7 +200,7 @@ Object* StubCache::ComputeKeyedLoadField(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -201,7 +219,7 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -219,7 +237,7 @@ Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -238,7 +256,7 @@ Object* StubCache::ComputeKeyedLoadCallback(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -256,7 +274,7 @@ Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -273,7 +291,7 @@ Object* StubCache::ComputeKeyedLoadStringLength(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -290,7 +308,7 @@ Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -309,7 +327,7 @@ Object* StubCache::ComputeStoreField(String* name,
StoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -317,6 +335,23 @@ Object* StubCache::ComputeStoreField(String* name,
}
+Object* StubCache::ComputeStoreGlobal(String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ StoreStubCompiler compiler;
+ code = compiler.CompileStoreGlobal(receiver, cell, name);
+ if (code->IsFailure()) return code;
+ LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
Object* StubCache::ComputeStoreCallback(String* name,
JSObject* receiver,
AccessorInfo* callback) {
@@ -327,7 +362,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
StoreStubCompiler compiler;
code = compiler.CompileStoreCallback(receiver, callback, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -344,7 +379,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
StoreStubCompiler compiler;
code = compiler.CompileStoreInterceptor(receiver, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -361,7 +396,7 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
KeyedStoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("KeyedStoreIC", Code::cast(code), name));
+ LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -409,10 +444,11 @@ Object* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc);
- code = compiler.CompileCallConstant(object, holder, function, check, flags);
+ CallStubCompiler compiler(argc, in_loop);
+ code = compiler.CompileCallConstant(object, holder, function, check);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ ASSERT_EQ(flags, Code::cast(code)->flags());
+ LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -442,10 +478,11 @@ Object* StubCache::ComputeCallField(int argc,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc);
- code = compiler.CompileCallField(object, holder, index, name, flags);
+ CallStubCompiler compiler(argc, in_loop);
+ code = compiler.CompileCallField(object, holder, index, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ ASSERT_EQ(flags, Code::cast(code)->flags());
+ LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -475,10 +512,11 @@ Object* StubCache::ComputeCallInterceptor(int argc,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc);
+ CallStubCompiler compiler(argc, NOT_IN_LOOP);
code = compiler.CompileCallInterceptor(object, holder, name);
if (code->IsFailure()) return code;
- LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+ ASSERT_EQ(flags, Code::cast(code)->flags());
+ LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -496,6 +534,33 @@ Object* StubCache::ComputeCallNormal(int argc,
}
+Object* StubCache::ComputeCallGlobal(int argc,
+ InLoopFlag in_loop,
+ String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ // If the function hasn't been compiled yet, we cannot do it now
+ // because it may cause GC. To avoid this issue, we return an
+ // internal error which will make sure we do not update any
+ // caches.
+ if (!function->is_compiled()) return Failure::InternalError();
+ CallStubCompiler compiler(argc, in_loop);
+ code = compiler.CompileCallGlobal(receiver, cell, function, name);
+ if (code->IsFailure()) return code;
+ ASSERT_EQ(flags, Code::cast(code)->flags());
+ LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+ Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ if (result->IsFailure()) return code;
+ }
+ return Set(name, receiver->map(), Code::cast(code));
+}
+
+
static Object* GetProbeValue(Code::Flags flags) {
Dictionary* dictionary = Heap::non_monomorphic_cache();
int entry = dictionary->FindNumberEntry(flags);
@@ -632,7 +697,8 @@ Object* StubCache::ComputeLazyCompile(int argc) {
if (result->IsCode()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("LazyCompile", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -780,7 +846,8 @@ Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
Counters::call_initialize_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallInitialize", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -795,7 +862,8 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
Counters::call_premonomorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallPreMonomorphic", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -810,7 +878,8 @@ Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
Counters::call_normal_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallNormal", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -825,7 +894,8 @@ Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallMegamorphic", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -840,7 +910,7 @@ Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallMiss", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
}
return result;
}
@@ -854,7 +924,8 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallDebugBreak", code, code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -870,8 +941,8 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
if (!result->IsFailure()) {
Code* code = Code::cast(result);
USE(code);
- LOG(CodeCreateEvent("CallDebugPrepareStepIn", code,
- code->arguments_count()));
+ LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
+ code, code->arguments_count()));
}
return result;
}
@@ -927,7 +998,7 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
type,
- NOT_IN_LOOP,
+ in_loop_,
argc);
return GetCodeWithFlags(flags, name);
}
diff --git a/V8Binding/v8/src/stub-cache.h b/V8Binding/v8/src/stub-cache.h
index b79841a..577e04b 100644
--- a/V8Binding/v8/src/stub-cache.h
+++ b/V8Binding/v8/src/stub-cache.h
@@ -78,6 +78,12 @@ class StubCache : public AllStatic {
static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+ static Object* ComputeLoadGlobal(String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell,
+ bool is_dont_delete);
+
+
// ---
static Object* ComputeKeyedLoadField(String* name,
@@ -112,6 +118,10 @@ class StubCache : public AllStatic {
int field_index,
Map* transition = NULL);
+ static Object* ComputeStoreGlobal(String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell);
+
static Object* ComputeStoreCallback(String* name,
JSObject* receiver,
AccessorInfo* callback);
@@ -151,6 +161,13 @@ class StubCache : public AllStatic {
Object* object,
JSObject* holder);
+ static Object* ComputeCallGlobal(int argc,
+ InLoopFlag in_loop,
+ String* name,
+ JSGlobalObject* receiver,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function);
+
// ---
static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -416,6 +433,11 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder,
String* name);
+ Object* CompileLoadGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* holder,
+ String* name,
+ bool is_dont_delete);
+
private:
Object* GetCode(PropertyType type, String* name);
};
@@ -457,6 +479,10 @@ class StoreStubCompiler: public StubCompiler {
AccessorInfo* callbacks,
String* name);
Object* CompileStoreInterceptor(JSObject* object, String* name);
+ Object* CompileStoreGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* holder,
+ String* name);
+
private:
Object* GetCode(PropertyType type, String* name);
@@ -477,24 +503,28 @@ class KeyedStoreStubCompiler: public StubCompiler {
class CallStubCompiler: public StubCompiler {
public:
- explicit CallStubCompiler(int argc) : arguments_(argc) { }
+ explicit CallStubCompiler(int argc, InLoopFlag in_loop)
+ : arguments_(argc), in_loop_(in_loop) { }
Object* CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name,
- Code::Flags flags);
+ String* name);
Object* CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check,
- Code::Flags flags);
+ CheckType check);
Object* CompileCallInterceptor(Object* object,
JSObject* holder,
String* name);
+ Object* CompileCallGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name);
private:
const ParameterCount arguments_;
+ const InLoopFlag in_loop_;
const ParameterCount& arguments() { return arguments_; }
diff --git a/V8Binding/v8/src/top.cc b/V8Binding/v8/src/top.cc
index 42a2b7e..96d4a01 100644
--- a/V8Binding/v8/src/top.cc
+++ b/V8Binding/v8/src/top.cc
@@ -611,6 +611,11 @@ Failure* Top::ReThrow(Object* exception, MessageLocation* location) {
}
+Failure* Top::ThrowIllegalOperation() {
+ return Throw(Heap::illegal_access_symbol());
+}
+
+
void Top::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
diff --git a/V8Binding/v8/src/top.h b/V8Binding/v8/src/top.h
index 53d67e5..25242f7 100644
--- a/V8Binding/v8/src/top.h
+++ b/V8Binding/v8/src/top.h
@@ -239,6 +239,7 @@ class Top {
static Failure* ReThrow(Object* exception, MessageLocation* location = NULL);
static void ScheduleThrow(Object* exception);
static void ReportPendingMessages();
+ static Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
static Object* PromoteScheduledException();
diff --git a/V8Binding/v8/src/utils.h b/V8Binding/v8/src/utils.h
index 137e2c4..91662ee 100644
--- a/V8Binding/v8/src/utils.h
+++ b/V8Binding/v8/src/utils.h
@@ -362,6 +362,11 @@ class Vector {
Sort(PointerValueCompare<T>);
}
+ void Truncate(int length) {
+ ASSERT(length <= length_);
+ length_ = length;
+ }
+
// Releases the array underlying this vector. Once disposed the
// vector is empty.
void Dispose() {
diff --git a/V8Binding/v8/src/v8-counters.h b/V8Binding/v8/src/v8-counters.h
index 4111312..a62cd74 100644
--- a/V8Binding/v8/src/v8-counters.h
+++ b/V8Binding/v8/src/v8-counters.h
@@ -130,7 +130,15 @@ namespace internal {
SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
SC(named_load_inline, V8.NamedLoadInline) \
SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
+ SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
+ SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
SC(keyed_store_field, V8.KeyedStoreField) \
+ SC(keyed_store_inline, V8.KeyedStoreInline) \
+ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
+ SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
+ SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(call_global_inline, V8.CallGlobalInline) \
+ SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
diff --git a/V8Binding/v8/src/v8.cc b/V8Binding/v8/src/v8.cc
index 17cb2df..72f74aa 100644
--- a/V8Binding/v8/src/v8.cc
+++ b/V8Binding/v8/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,6 +33,10 @@
#include "stub-cache.h"
#include "oprofile-agent.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -62,6 +66,11 @@ bool V8::Initialize(Deserializer *des) {
// Setup the platform OS support.
OS::Setup();
+ // Initialize other runtime facilities
+#if !V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
+ ::assembler::arm::Simulator::Initialize();
+#endif
+
// Setup the object heap
ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) {
@@ -69,7 +78,6 @@ bool V8::Initialize(Deserializer *des) {
return false;
}
- // Initialize other runtime facilities
Bootstrapper::Initialize(create_heap_objects);
Builtins::Setup(create_heap_objects);
Top::Initialize();
@@ -130,4 +138,29 @@ void V8::TearDown() {
}
+uint32_t V8::Random() {
+ // Random number generator using George Marsaglia's MWC algorithm.
+ static uint32_t hi = 0;
+ static uint32_t lo = 0;
+
+ // Initialize seed using the system random(). If one of the seeds
+ // should ever become zero again, or if random() returns zero, we
+ // avoid getting stuck with zero bits in hi or lo by re-initializing
+ // them on demand.
+ if (hi == 0) hi = random();
+ if (lo == 0) lo = random();
+
+ // Mix the bits.
+ hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
+ lo = 18273 * (lo & 0xFFFF) + (lo >> 16);
+ return (hi << 16) + (lo & 0xFFFF);
+}
+
+
+Smi* V8::RandomPositiveSmi() {
+ uint32_t random = Random();
+ ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
+ return Smi::FromInt(random & Smi::kMaxValue);
+}
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/v8.h b/V8Binding/v8/src/v8.h
index 8cb3c7d..2cfce3d 100644
--- a/V8Binding/v8/src/v8.h
+++ b/V8Binding/v8/src/v8.h
@@ -71,6 +71,7 @@
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
+#include "log-inl.h"
#include "messages.h"
namespace v8 {
@@ -80,10 +81,10 @@ class V8 : public AllStatic {
public:
// Global actions.
- // If Initialize is called with des == NULL, the
- // initial state is created from scratch. If a non-null Deserializer
- // is given, the initial state is created by reading the
- // deserialized data into an empty heap.
+ // If Initialize is called with des == NULL, the initial state is
+ // created from scratch. If a non-null Deserializer is given, the
+ // initial state is created by reading the deserialized data into an
+ // empty heap.
static bool Initialize(Deserializer* des);
static void TearDown();
static bool IsRunning() { return is_running_; }
@@ -93,6 +94,11 @@ class V8 : public AllStatic {
// Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location);
+
+ // Random number generation support. Not cryptographically safe.
+ static uint32_t Random();
+ static Smi* RandomPositiveSmi();
+
private:
// True if engine is currently running
static bool is_running_;
diff --git a/V8Binding/v8/src/v8natives.js b/V8Binding/v8/src/v8natives.js
index fe46351..841c920 100644
--- a/V8Binding/v8/src/v8natives.js
+++ b/V8Binding/v8/src/v8natives.js
@@ -154,7 +154,7 @@ function SetupGlobal() {
// ECMA-262 - 15.1.1.3.
%SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
-
+
// Setup non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
"isNaN", GlobalIsNaN,
@@ -174,7 +174,7 @@ SetupGlobal();
%SetCode($Boolean, function(x) {
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
%_SetValueOf(this, ToBoolean(x));
} else {
return ToBoolean(x);
@@ -192,7 +192,7 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- var c = %ClassOf(this);
+ var c = %_ClassOf(this);
// Hide Arguments from the outside.
if (c === 'Arguments') c = 'Object';
return "[object " + c + "]";
@@ -273,7 +273,7 @@ function ObjectLookupSetter(name) {
%SetCode($Object, function(x) {
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
if (x == null) return this;
return ToObject(x);
} else {
@@ -311,7 +311,7 @@ SetupObject();
function BooleanToString() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !%HasBooleanClass(this))
+ if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
throw new $TypeError('Boolean.prototype.toString is not generic');
return ToString(%_ValueOf(this));
}
@@ -320,7 +320,7 @@ function BooleanToString() {
function BooleanValueOf() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !%HasBooleanClass(this))
+ if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
throw new $TypeError('Boolean.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@@ -350,7 +350,7 @@ SetupBoolean();
// Set the Number function and constructor.
%SetCode($Number, function(x) {
var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
- if (%IsConstructCall()) {
+ if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
@@ -365,7 +365,7 @@ function NumberToString(radix) {
// 'this'. This is not as dictated by ECMA-262.
var number = this;
if (!IS_NUMBER(this)) {
- if (!%HasNumberClass(this))
+ if (!IS_NUMBER_WRAPPER(this))
throw new $TypeError('Number.prototype.toString is not generic');
// Get the value of this number in case it's an object.
number = %_ValueOf(this);
@@ -395,7 +395,7 @@ function NumberToLocaleString() {
function NumberValueOf() {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
- if (!IS_NUMBER(this) && !%HasNumberClass(this))
+ if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
throw new $TypeError('Number.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@@ -502,10 +502,9 @@ SetupNumber();
$Function.prototype.constructor = $Function;
function FunctionSourceString(func) {
- // NOTE: Both Function objects and values can enter here as
- // 'func'. This is not as dictated by ECMA-262.
- if (!IS_FUNCTION(func) && !%HasFunctionClass(func))
+ if (!IS_FUNCTION(func)) {
throw new $TypeError('Function.prototype.toString is not generic');
+ }
var source = %FunctionGetSourceCode(func);
if (!IS_STRING(source)) {
@@ -570,4 +569,3 @@ function SetupFunction() {
}
SetupFunction();
-
diff --git a/V8Binding/v8/src/variables.cc b/V8Binding/v8/src/variables.cc
index 6c9f82f..d9a78a5 100644
--- a/V8Binding/v8/src/variables.cc
+++ b/V8Binding/v8/src/variables.cc
@@ -140,12 +140,12 @@ Variable::Variable(Scope* scope,
Handle<String> name,
Mode mode,
bool is_valid_LHS,
- bool is_this)
+ Kind kind)
: scope_(scope),
name_(name),
mode_(mode),
is_valid_LHS_(is_valid_LHS),
- is_this_(is_this),
+ kind_(kind),
local_if_not_shadowed_(NULL),
is_accessed_from_inner_scope_(false),
rewrite_(NULL) {
diff --git a/V8Binding/v8/src/variables.h b/V8Binding/v8/src/variables.h
index 5062071..c0d1435 100644
--- a/V8Binding/v8/src/variables.h
+++ b/V8Binding/v8/src/variables.h
@@ -137,6 +137,12 @@ class Variable: public ZoneObject {
// in a context
};
+ enum Kind {
+ NORMAL,
+ THIS,
+ ARGUMENTS
+ };
+
// Printing support
static const char* Mode2String(Mode mode);
@@ -172,7 +178,8 @@ class Variable: public ZoneObject {
}
bool is_global() const;
- bool is_this() const { return is_this_; }
+ bool is_this() const { return kind_ == THIS; }
+ bool is_arguments() const { return kind_ == ARGUMENTS; }
Variable* local_if_not_shadowed() const {
ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
@@ -190,13 +197,13 @@ class Variable: public ZoneObject {
private:
Variable(Scope* scope, Handle<String> name, Mode mode, bool is_valid_LHS,
- bool is_this);
+ Kind kind);
Scope* scope_;
Handle<String> name_;
Mode mode_;
bool is_valid_LHS_;
- bool is_this_;
+ Kind kind_;
Variable* local_if_not_shadowed_;
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index d613e94..fd585dc 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,9 +34,9 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
-#define BUILD_NUMBER 7
+#define BUILD_NUMBER 11
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION false
+#define CANDIDATE_VERSION true
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/V8Binding/v8/src/virtual-frame.cc b/V8Binding/v8/src/virtual-frame.cc
index 39dbf17..44e5fae 100644
--- a/V8Binding/v8/src/virtual-frame.cc
+++ b/V8Binding/v8/src/virtual-frame.cc
@@ -73,7 +73,6 @@ FrameElement VirtualFrame::CopyElementAt(int index) {
case FrameElement::MEMORY: // Fall through.
case FrameElement::REGISTER:
// All copies are backed by memory or register locations.
- result.set_static_type(target.static_type());
result.set_type(FrameElement::COPY);
result.clear_copied();
result.clear_sync();
@@ -153,7 +152,6 @@ void VirtualFrame::SpillElementAt(int index) {
if (elements_[index].is_register()) {
Unuse(elements_[index].reg());
}
- new_element.set_static_type(elements_[index].static_type());
elements_[index] = new_element;
}
@@ -211,9 +209,6 @@ void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
ASSERT(source.is_valid());
elements_[i].clear_sync();
}
- // No code needs to be generated to change the static type of an
- // element.
- elements_[i].set_static_type(target.static_type());
}
}
@@ -246,11 +241,8 @@ void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
void VirtualFrame::PrepareForReturn() {
// Spill all locals. This is necessary to make sure all locals have
// the right value when breaking at the return site in the debugger.
- // Set their static type to unknown so that they will match the known
- // return frame.
for (int i = 0; i < expression_base_index(); i++) {
SpillElementAt(i);
- elements_[i].set_static_type(StaticType::unknown());
}
}
@@ -283,7 +275,6 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
// register element, or the new element at frame_index, must be made
// a copy.
int i = register_location(value->reg());
- ASSERT(value->static_type() == elements_[i].static_type());
if (i < frame_index) {
// The register FrameElement is lower in the frame than the new copy.
@@ -310,8 +301,7 @@ void VirtualFrame::SetElementAt(int index, Result* value) {
Use(value->reg(), frame_index);
elements_[frame_index] =
FrameElement::RegisterElement(value->reg(),
- FrameElement::NOT_SYNCED,
- value->static_type());
+ FrameElement::NOT_SYNCED);
}
} else {
ASSERT(value->is_constant());
@@ -328,18 +318,16 @@ void VirtualFrame::PushFrameSlotAt(int index) {
}
-void VirtualFrame::Push(Register reg, StaticType static_type) {
+void VirtualFrame::Push(Register reg) {
if (is_used(reg)) {
int index = register_location(reg);
FrameElement element = CopyElementAt(index);
- ASSERT(static_type.merge(element.static_type()) == element.static_type());
elements_.Add(element);
} else {
Use(reg, element_count());
FrameElement element =
FrameElement::RegisterElement(reg,
- FrameElement::NOT_SYNCED,
- static_type);
+ FrameElement::NOT_SYNCED);
elements_.Add(element);
}
}
diff --git a/V8Binding/v8/src/virtual-frame.h b/V8Binding/v8/src/virtual-frame.h
index 293f9e5..0bf0ca2 100644
--- a/V8Binding/v8/src/virtual-frame.h
+++ b/V8Binding/v8/src/virtual-frame.h
@@ -37,6 +37,8 @@
#include "x64/virtual-frame-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/virtual-frame-arm.h"
+#else
+#error Unsupported target architecture.
#endif
#endif // V8_VIRTUAL_FRAME_H_
diff --git a/V8Binding/v8/src/x64/assembler-x64-inl.h b/V8Binding/v8/src/x64/assembler-x64-inl.h
index 1822568..196f2ee 100644
--- a/V8Binding/v8/src/x64/assembler-x64-inl.h
+++ b/V8Binding/v8/src/x64/assembler-x64-inl.h
@@ -29,6 +29,7 @@
#define V8_X64_ASSEMBLER_X64_INL_H_
#include "cpu.h"
+#include "memory.h"
namespace v8 {
namespace internal {
@@ -70,18 +71,28 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+ emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
void Assembler::emit_rex_64(Register reg, const Operand& op) {
+ emit(0x48 | reg.high_bit() << 2 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
}
void Assembler::emit_rex_64(Register rm_reg) {
ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
- emit(0x48 | (rm_reg.code() >> 3));
+ emit(0x48 | rm_reg.high_bit());
}
@@ -91,17 +102,17 @@ void Assembler::emit_rex_64(const Operand& op) {
void Assembler::emit_rex_32(Register reg, Register rm_reg) {
- emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+ emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
}
void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
+ emit(0x40 | reg.high_bit() << 2 | op.rex_);
}
void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | (rm_reg.code() & 0x8) >> 3);
+ emit(0x40 | rm_reg.high_bit());
}
@@ -111,19 +122,37 @@ void Assembler::emit_rex_32(const Operand& op) {
void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
+ byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+ byte rex_bits = reg.high_bit() << 2 | op.rex_;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
+void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
void Assembler::emit_optional_rex_32(Register rm_reg) {
- if (rm_reg.code() & 0x8 != 0) emit(0x41);
+ if (rm_reg.high_bit()) emit(0x41);
}
@@ -147,16 +176,8 @@ void Assembler::set_target_address_at(Address pc, Address target) {
// Implementation of RelocInfo
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(int delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p -= delta; // relocate entry
- } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_ + 1);
- *p -= delta; // relocate entry
- } else if (IsInternalReference(rmode_)) {
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
*p += delta; // relocate entry
@@ -249,39 +270,22 @@ Object** RelocInfo::call_object_address() {
// -----------------------------------------------------------------------------
// Implementation of Operand
-Operand::Operand(Register base, int32_t disp) {
- len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(kTimes1, rsp, base);
- }
-
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
-}
-
-void Operand::set_modrm(int mod, Register rm) {
- ASSERT((mod & -4) == 0);
- buf_[0] = mod << 6 | (rm.code() & 0x7);
+void Operand::set_modrm(int mod, Register rm_reg) {
+ ASSERT(is_uint2(mod));
+ buf_[0] = mod << 6 | rm_reg.low_bits();
// Set REX.B to the high bit of rm.code().
- rex_ |= (rm.code() >> 3);
+ rex_ |= rm_reg.high_bit();
}
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
ASSERT(len_ == 1);
ASSERT(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12.
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
- rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
+ buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+ rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index 77bbf52..ced7577 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -72,19 +72,61 @@ XMMRegister xmm13 = { 13 };
XMMRegister xmm14 = { 14 };
XMMRegister xmm15 = { 15 };
-// Safe default is no features.
-uint64_t CpuFeatures::supported_ = 0;
+
+Operand::Operand(Register base, int32_t disp): rex_(0) {
+ len_ = 1;
+ if (base.is(rsp) || base.is(r12)) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp): rex_(0) {
+ ASSERT(!index.is(rsp));
+ len_ = 1;
+ set_sib(scale, index, base);
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+}
+
+
+// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
+uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
uint64_t CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
- ASSERT(supported_ == 0);
+ ASSERT(supported_ == kDefaultCpuFeatures);
if (Serializer::enabled()) return; // No features if we might serialize.
Assembler assm(NULL, 0);
Label cpuid, done;
#define __ assm.
- // Save old esp, since we are going to modify the stack.
+ // Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
__ push(rcx);
@@ -112,11 +154,11 @@ void CpuFeatures::Probe() {
// safe here.
__ bind(&cpuid);
__ movq(rax, Immediate(1));
- supported_ = (1 << CPUID);
+ supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
}
- supported_ = 0;
+ supported_ = kDefaultCpuFeatures;
// Move the result from ecx:edx to rax and make sure to mark the
// CPUID feature as supported.
@@ -140,10 +182,15 @@ void CpuFeatures::Probe() {
Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
if (!code->IsCode()) return;
- LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+ LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+ Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ // SSE2 and CMOV must be available on an X64 CPU.
+ ASSERT(IsSupported(CPUID));
+ ASSERT(IsSupported(SSE2));
+ ASSERT(IsSupported(CMOV));
}
// -----------------------------------------------------------------------------
@@ -298,8 +345,9 @@ void Assembler::GrowBuffer() {
#endif
// copy the data
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
@@ -322,11 +370,8 @@ void Assembler::GrowBuffer() {
// relocate runtime entries
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
if (*p != 0) { // 0 means uninitialized.
*p += pc_delta;
}
@@ -337,13 +382,14 @@ void Assembler::GrowBuffer() {
}
-void Assembler::emit_operand(int rm, const Operand& adr) {
- ASSERT_EQ(rm & 0x07, rm);
+void Assembler::emit_operand(int code, const Operand& adr) {
+ ASSERT(is_uint3(code));
const unsigned length = adr.len_;
ASSERT(length > 0);
// Emit updated ModR/M byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (rm << 3);
+ ASSERT((adr.buf_[0] & 0x38) == 0);
+ pc_[0] = adr.buf_[0] | code << 3;
// Emit the rest of the encoded operand.
for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
@@ -370,6 +416,16 @@ void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
emit_modrm(dst, src);
}
+
+void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(opcode);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
@@ -398,16 +454,84 @@ void Assembler::immediate_arithmetic_op(byte subcode,
emit_rex_64(dst);
if (is_int8(src.value_)) {
emit(0x83);
- emit_operand(Register::toRegister(subcode), dst);
+ emit_operand(subcode, dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::immediate_arithmetic_op_32(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0x83);
+ if (is_int8(src.value_)) {
+ emit_modrm(subcode, dst);
emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
} else {
emit(0x81);
- emit_operand(Register::toRegister(subcode), dst);
+ emit_modrm(subcode, dst);
emitl(src.value_);
}
}
+void Assembler::immediate_arithmetic_op_32(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_operand(subcode, dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+ emit(0x80);
+ emit_operand(subcode, dst);
+ emit(src.value_);
+}
+
+
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.code() > 3) {
+ // Use 64-bit mode byte registers.
+ emit_rex_64(dst);
+ }
+ ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+ emit(0x80);
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+}
+
+
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -434,6 +558,15 @@ void Assembler::shift(Register dst, int subcode) {
}
+void Assembler::shift_32(Register dst, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xD3);
+ emit_modrm(subcode, dst);
+}
+
+
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -479,21 +612,13 @@ void Assembler::call(Register adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: FF /2 r64
- if (adr.code() > 7) {
+ if (adr.high_bit()) {
emit_rex_64(adr);
}
emit(0xFF);
emit_modrm(0x2, adr);
}
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x0F);
- emit(0xA2);
-}
-
void Assembler::call(const Operand& op) {
EnsureSpace ensure_space(this);
@@ -505,6 +630,66 @@ void Assembler::call(const Operand& op) {
}
+void Assembler::cmovq(Condition cc, Register dst, Register src) {
+ // No need to check CpuInfo for CMOV support, it's a required part of the
+ // 64-bit architecture.
+ ASSERT(cc >= 0); // Use mov for unconditional moves.
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: REX.W 0f 40 + cc /r
+ emit_rex_64(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: REX.W 0f 40 + cc /r
+ emit_rex_64(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, Register src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: 0f 40 + cc /r
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+ ASSERT(cc >= 0);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: 0f 40 + cc /r
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x40 + cc);
+ emit_operand(dst, src);
+}
+
+
+
+void Assembler::cpuid() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x0F);
+ emit(0xA2);
+}
+
+
void Assembler::cqo() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -513,7 +698,7 @@ void Assembler::cqo() {
}
-void Assembler::dec(Register dst) {
+void Assembler::decq(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -522,7 +707,7 @@ void Assembler::dec(Register dst) {
}
-void Assembler::dec(const Operand& dst) {
+void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -531,6 +716,15 @@ void Assembler::dec(const Operand& dst) {
}
+void Assembler::decl(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_operand(1, dst);
+}
+
+
void Assembler::enter(Immediate size) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -556,6 +750,25 @@ void Assembler::idiv(Register src) {
}
+void Assembler::imul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x5, src);
+}
+
+
+void Assembler::imul(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -582,7 +795,17 @@ void Assembler::imul(Register dst, Register src, Immediate imm) {
}
-void Assembler::inc(Register dst) {
+void Assembler::imull(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -591,7 +814,7 @@ void Assembler::inc(Register dst) {
}
-void Assembler::inc(const Operand& dst) {
+void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -600,6 +823,15 @@ void Assembler::inc(const Operand& dst) {
}
+void Assembler::incl(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_operand(0, dst);
+}
+
+
void Assembler::int3() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -610,7 +842,7 @@ void Assembler::int3() {
void Assembler::j(Condition cc, Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
+ ASSERT(is_uint4(cc));
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -678,7 +910,7 @@ void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode FF/4 r64
- if (target.code() > 7) {
+ if (target.high_bit()) {
emit_rex_64(target);
}
emit(0xFF);
@@ -768,6 +1000,16 @@ void Assembler::movl(const Operand& dst, Register src) {
}
+void Assembler::movl(const Operand& dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xC7);
+ emit_operand(0x0, dst);
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
void Assembler::movl(Register dst, Immediate value) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -819,7 +1061,7 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7));
+ emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value), rmode);
}
@@ -828,7 +1070,7 @@ void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7)); // Not a ModR/M byte.
+ emit(0xB8 | dst.low_bits());
emitq(value, rmode);
}
@@ -837,12 +1079,65 @@ void Assembler::movq(Register dst, ExternalReference ref) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
- emit(0xB8 | (dst.code() & 0x7));
+ emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(ref.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
+void Assembler::movq(const Operand& dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xC7);
+ emit_operand(0, dst);
+ emit(value);
+}
+
+
+void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(!Heap::InNewSpace(*value));
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ if (value->IsHeapObject()) {
+ emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+ } else {
+ ASSERT_EQ(RelocInfo::NONE, mode);
+ emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE);
+ }
+}
+
+
+void Assembler::movsxlq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x63);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movsxlq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x63);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzxbq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xB6);
+ emit_operand(dst, src);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -985,10 +1280,10 @@ void Assembler::nop(int n) {
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (dst.code() > 7) {
+ if (dst.high_bit()) {
emit_rex_64(dst);
}
- emit(0x58 | (dst.code() & 0x7));
+ emit(0x58 | dst.low_bits());
}
@@ -1011,10 +1306,10 @@ void Assembler::popfq() {
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (src.code() > 7) {
+ if (src.high_bit()) {
emit_rex_64(src);
}
- emit(0x50 | (src.code() & 0x7));
+ emit(0x50 | src.low_bits());
}
@@ -1063,6 +1358,13 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
}
}
+void Assembler::rdtsc() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x0F);
+ emit(0x31);
+}
+
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
@@ -1078,6 +1380,19 @@ void Assembler::ret(int imm16) {
}
+void Assembler::setcc(Condition cc, Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint4(cc));
+ if (reg.code() > 3) { // Use x64 byte registers, where different.
+ emit_rex_32(reg);
+ }
+ emit(0x0F);
+ emit(0x90 | cc);
+ emit_modrm(0x0, reg);
+}
+
+
void Assembler::shld(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1104,7 +1419,7 @@ void Assembler::xchg(Register dst, Register src) {
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
emit_rex_64(other);
- emit(0x90 | (other.code() & 0x7));
+ emit(0x90 | other.low_bits());
} else {
emit_rex_64(src, dst);
emit(0x87);
@@ -1128,6 +1443,7 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register reg, Immediate mask) {
+ ASSERT(is_int8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (reg.is(rax)) {
@@ -1146,6 +1462,7 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) {
+ ASSERT(is_int8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(rax, op);
@@ -1198,350 +1515,618 @@ void Assembler::testq(Register dst, Register src) {
}
-// Relocation information implementations
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+void Assembler::testq(Register dst, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.is(rax)) {
+ emit_rex_64();
+ emit(0xA9);
+ emit(mask);
+ } else {
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_modrm(0, dst);
+ emit(mask);
}
- RelocInfo rinfo(pc_, rmode, data);
- reloc_info_writer.Write(&rinfo);
}
-void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
+
+// FPU instructions
+
+
+void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC0, i);
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
+void Assembler::fld1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE8);
}
-void Assembler::RecordPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- current_position_ = pos;
+void Assembler::fldz() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xEE);
}
-void Assembler::RecordStatementPosition(int pos) {
- ASSERT(pos != RelocInfo::kNoPosition);
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
+void Assembler::fld_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit_operand(0, adr);
}
-void Assembler::WriteRecordedPositions() {
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- }
+void Assembler::fld_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDD);
+ emit_operand(0, adr);
+}
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- }
+
+void Assembler::fstp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit_operand(3, adr);
}
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+void Assembler::fstp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDD);
+ emit_operand(3, adr);
+}
-} } // namespace v8::internal
+void Assembler::fild_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(0, adr);
+}
-// TODO(x64): Implement and move these to their correct cc-files:
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "cpu.h"
-#include "debug.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "frames-inl.h"
-#include "x64/macro-assembler-x64.h"
-#include "x64/regexp-macro-assembler-x64.h"
-#include "ic-inl.h"
-#include "log.h"
-#include "macro-assembler.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "register-allocator.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "unicode.h"
+void Assembler::fild_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit_operand(5, adr);
+}
-namespace v8 {
-namespace internal {
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) {
- UNIMPLEMENTED();
+void Assembler::fistp_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(3, adr);
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* a) {
- UNIMPLEMENTED();
+
+void Assembler::fisttp_s(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(1, adr);
}
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* a) {
- UNIMPLEMENTED();
+
+void Assembler::fist_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit_operand(2, adr);
}
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED();
+void Assembler::fistp_d(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit_operand(8, adr);
}
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- UNIMPLEMENTED();
- return false;
+
+void Assembler::fabs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE1);
}
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED();
+
+void Assembler::fchs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE0);
}
-void CallIC::Generate(MacroAssembler* a, int b, ExternalReference const& c) {
- UNIMPLEMENTED();
+
+void Assembler::fcos() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFF);
}
-void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
- UNIMPLEMENTED();
+
+void Assembler::fsin() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFE);
}
-void CallIC::GenerateNormal(MacroAssembler* a, int b) {
- UNIMPLEMENTED();
+
+void Assembler::fadd(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC0, i);
}
-Object* CallStubCompiler::CompileCallConstant(Object* a,
- JSObject* b,
- JSFunction* c,
- StubCompiler::CheckType d,
- Code::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fsub(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xE8, i);
}
-Object* CallStubCompiler::CompileCallField(Object* a,
- JSObject* b,
- int c,
- String* d,
- Code::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fisub_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDA);
+ emit_operand(4, adr);
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* a,
- JSObject* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fmul(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xC8, i);
}
-StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
- StackFrame::State* b) {
- // TODO(X64): UNIMPLEMENTED
- return NONE;
+void Assembler::fdiv(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDC, 0xF8, i);
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- UNIMPLEMENTED();
- return 0;
+
+void Assembler::faddp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC0, i);
}
-void JumpTarget::DoBind(int a) {
- UNIMPLEMENTED();
+
+void Assembler::fsubp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE8, i);
}
-void JumpTarget::DoBranch(Condition a, Hint b) {
- UNIMPLEMENTED();
+
+void Assembler::fsubrp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xE0, i);
}
-void JumpTarget::DoJump() {
- UNIMPLEMENTED();
+
+void Assembler::fmulp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xC8, i);
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
- JSObject* b,
- AccessorInfo* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
+void Assembler::fdivp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDE, 0xF8, i);
}
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
- JSObject* b,
- Object* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fprem() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF8);
}
-Object* LoadStubCompiler::CompileLoadField(JSObject* a,
- JSObject* b,
- int c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fprem1() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF5);
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
- JSObject* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fxch(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xD9, 0xC8, i);
}
-StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
- UNIMPLEMENTED();
- return NONE;
+
+void Assembler::fincstp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xF7);
}
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
- AccessorInfo* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::ffree(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xC0, i);
}
-Object* StoreStubCompiler::CompileStoreField(JSObject* a,
- int b,
- Map* c,
- String* d) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::ftst() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xE4);
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fucomp(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_farith(0xDD, 0xE8, i);
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags a) {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::fucompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDA);
+ emit(0xE9);
}
-void VirtualFrame::Drop(int a) {
- UNIMPLEMENTED();
+
+void Assembler::fcompp() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDE);
+ emit(0xD9);
}
-int VirtualFrame::InvalidateFrameSlotAt(int a) {
- UNIMPLEMENTED();
- return -1;
+
+void Assembler::fnstsw_ax() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDF);
+ emit(0xE0);
}
-void VirtualFrame::MergeTo(VirtualFrame* a) {
- UNIMPLEMENTED();
+
+void Assembler::fwait() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9B);
}
-Result VirtualFrame::Pop() {
- UNIMPLEMENTED();
- return Result(NULL);
+
+void Assembler::frndint() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xD9);
+ emit(0xFC);
}
-Result VirtualFrame::RawCallStub(CodeStub* a) {
- UNIMPLEMENTED();
- return Result(NULL);
+
+void Assembler::fnclex() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xDB);
+ emit(0xE2);
}
-void VirtualFrame::SyncElementBelowStackPointer(int a) {
- UNIMPLEMENTED();
+
+void Assembler::sahf() {
+ // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
+ // in 64-bit mode. Test CpuID.
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9E);
}
-void VirtualFrame::SyncElementByPushing(int a) {
- UNIMPLEMENTED();
+
+void Assembler::emit_farith(int b1, int b2, int i) {
+ ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
+ ASSERT(is_uint3(i)); // illegal stack offset
+ emit(b1);
+ emit(b2 + i);
}
-void VirtualFrame::SyncRange(int a, int b) {
- UNIMPLEMENTED();
+// SSE 2 operations
+
+void Assembler::movsd(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11); // store
+ emit_sse_operand(src, dst);
}
-VirtualFrame::VirtualFrame() : elements_(0) {
- UNIMPLEMENTED();
+
+void Assembler::movsd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11); // store
+ emit_sse_operand(src, dst);
}
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED();
- return NULL;
+
+void Assembler::movsd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2); // double
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::cvttss2si(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_operand(dst, src);
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_operand(dst, src);
}
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
}
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+
+void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+ Register ireg = { reg.code() };
+ emit_operand(ireg, adr);
+}
+
+
+void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
+ emit(0xC0 | (dst.code() << 3) | src.code());
+}
+
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ emit(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
+// Relocation information implementations
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(rmode != RelocInfo::NONE);
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
}
-void ExitFrame::Iterate(ObjectVisitor* a) const {
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+} } // namespace v8::internal
+
+
+// TODO(x64): Implement and move these to their correct cc-files:
+#include "ast.h"
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "cpu.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "frames-inl.h"
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+#include "ic-inl.h"
+#include "log.h"
+#include "macro-assembler.h"
+#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
+#include "register-allocator-inl.h"
+#include "register-allocator.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "unicode.h"
+
+namespace v8 {
+namespace internal {
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
UNIMPLEMENTED();
}
-byte* InternalFrame::GetCallerStackPointer() const {
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
UNIMPLEMENTED();
- return NULL;
+ return false;
}
-byte* JavaScriptFrame::GetCallerStackPointer() const {
+void BreakLocationIterator::SetDebugBreakAtReturn() {
UNIMPLEMENTED();
- return NULL;
}
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index b488257..4e1eeff 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -45,7 +45,7 @@ namespace internal {
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
- return x == x & kUInt32Mask;
+ return x == (x & kUInt32Mask);
}
static inline bool is_int32(int64_t x) {
@@ -77,23 +77,32 @@ static inline bool is_int32(int64_t x) {
struct Register {
static Register toRegister(int code) {
- Register r = {code};
+ Register r = { code };
return r;
}
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; }
- // The byte-register distinction of ai32 has dissapeared.
- bool is_byte_register() const { return false; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
- UNIMPLEMENTED();
- return 0;
+ return 1 << code_;
}
- // (unfortunately we can't make this private in a struct)
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const {
+ return code_ >> 3;
+ }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const {
+ return code_ & 0x7;
+ }
+
+ // (unfortunately we can't make this private in a struct when initializing
+ // by assignment.)
int code_;
};
@@ -115,7 +124,8 @@ extern Register r14;
extern Register r15;
extern Register no_reg;
-struct XMMRegister {
+
+struct MMXRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
int code() const {
ASSERT(is_valid());
@@ -125,6 +135,34 @@ struct XMMRegister {
int code_;
};
+extern MMXRegister mm0;
+extern MMXRegister mm1;
+extern MMXRegister mm2;
+extern MMXRegister mm3;
+extern MMXRegister mm4;
+extern MMXRegister mm5;
+extern MMXRegister mm6;
+extern MMXRegister mm7;
+extern MMXRegister mm8;
+extern MMXRegister mm9;
+extern MMXRegister mm10;
+extern MMXRegister mm11;
+extern MMXRegister mm12;
+extern MMXRegister mm13;
+extern MMXRegister mm14;
+extern MMXRegister mm15;
+
+
+struct XMMRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
extern XMMRegister xmm0;
extern XMMRegister xmm1;
extern XMMRegister xmm2;
@@ -238,19 +276,19 @@ class Immediate BASE_EMBEDDED {
// Machine instruction Operands
enum ScaleFactor {
- kTimes1 = 0,
- kTimes2 = 1,
- kTimes4 = 2,
- kTimes8 = 3,
- kTimesIntSize = kTimes4,
- kTimesPointerSize = kTimes8
+ times_1 = 0,
+ times_2 = 1,
+ times_4 = 2,
+ times_8 = 3,
+ times_int_size = times_4,
+ times_pointer_size = times_8
};
class Operand BASE_EMBEDDED {
public:
// [base + disp/r]
- INLINE(Operand(Register base, int32_t disp));
+ Operand(Register base, int32_t disp);
// [base + index*scale + disp/r]
Operand(Register base,
@@ -290,11 +328,11 @@ class Operand BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
-// // Generate SSE2 floating point code.
+// if (CpuFeatures::IsSupported(SSE3)) {
+// CpuFeatures::Scope fscope(SSE3);
+// // Generate SSE3 floating point code.
// } else {
-// // Generate standard x87 floating point code.
+// // Generate standard x87 or SSE2 floating point code.
// }
class CpuFeatures : public AllStatic {
public:
@@ -331,6 +369,10 @@ class CpuFeatures : public AllStatic {
#endif
};
private:
+ // Safe defaults include SSE2 and CMOV for X64. It is always available, if
+ // anyone checks, but they shouldn't need to check.
+ static const uint64_t kDefaultCpuFeatures =
+ (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
static uint64_t supported_;
static uint64_t enabled_;
};
@@ -338,11 +380,15 @@ class CpuFeatures : public AllStatic {
class Assembler : public Malloced {
private:
- // The relocation writer's position is kGap bytes below the end of
+ // We check before assembling an instruction that there is sufficient
+ // space to write an instruction and its relocation information.
+ // The relocation writer's position must be kGap bytes above the end of
// the generated instructions. This leaves enough space for the
- // longest possible x64 instruction (There is a 15 byte limit on
- // instruction length, ruling out some otherwise valid instructions) and
- // allows for a single, fast space check per instruction.
+ // longest possible x64 instruction, 15 bytes, and the longest possible
+ // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
+ // (There is a 15 byte limit on x64 instruction length that rules out some
+ // otherwise valid instructions.)
+ // This allows for a single, fast space check per instruction.
static const int kGap = 32;
public:
@@ -373,8 +419,8 @@ class Assembler : public Malloced {
static inline void set_target_address_at(Address pc, Address target);
// Distance between the address of the code target in the call instruction
- // and the return address
- static const int kTargetAddrToReturnAddrDist = kPointerSize;
+ // and the return address. Checked in the debug build.
+ static const int kTargetAddrToReturnAddrDist = 3 + kPointerSize;
// ---------------------------------------------------------------------------
@@ -385,7 +431,8 @@ class Assembler : public Malloced {
//
// If we need versions of an assembly instruction that operate on different
// width arguments, we add a single-letter suffix specifying the width.
- // This is done for the following instructions: mov, cmp.
+ // This is done for the following instructions: mov, cmp, inc, dec,
+ // add, sub, and test.
// There are no versions of these instructions without the suffix.
// - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
@@ -423,10 +470,10 @@ class Assembler : public Malloced {
void movl(Register dst, Register src);
void movl(Register dst, const Operand& src);
void movl(const Operand& dst, Register src);
+ void movl(const Operand& dst, Immediate imm);
// Load a 32-bit immediate value, zero-extended to 64 bits.
void movl(Register dst, Immediate imm32);
- void movq(Register dst, int32_t imm32);
void movq(Register dst, const Operand& src);
// Sign extends immediate 32-bit value to 64 bits.
void movq(Register dst, Immediate x);
@@ -434,7 +481,8 @@ class Assembler : public Malloced {
// Move 64 bit register value to 64-bit memory location.
void movq(const Operand& dst, Register src);
-
+ // Move sign extended immediate to memory location.
+ void movq(const Operand& dst, Immediate value);
// New x64 instructions to load a 64-bit immediate into a register.
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
@@ -444,66 +492,86 @@ class Assembler : public Malloced {
void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+ void movsxlq(Register dst, Register src);
+ void movsxlq(Register dst, const Operand& src);
+ void movzxbq(Register dst, const Operand& src);
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
- void movsx_b(Register dst, const Operand& src);
-
- void movsx_w(Register dst, const Operand& src);
-
- void movzx_b(Register dst, const Operand& src);
-
- void movzx_w(Register dst, const Operand& src);
-
- // Conditional moves
- void cmov(Condition cc, Register dst, int32_t imm32);
- void cmov(Condition cc, Register dst, Handle<Object> handle);
- void cmov(Condition cc, Register dst, const Operand& src);
+ // Conditional moves.
+ void cmovq(Condition cc, Register dst, Register src);
+ void cmovq(Condition cc, Register dst, const Operand& src);
+ void cmovl(Condition cc, Register dst, Register src);
+ void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchg(Register dst, Register src);
// Arithmetics
- void add(Register dst, Register src) {
+ void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
- void add(Register dst, const Operand& src) {
+ void addl(Register dst, Register src) {
+ arithmetic_op_32(0x03, dst, src);
+ }
+
+ void addl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
+ void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
- void add(const Operand& dst, Register src) {
+ void addq(const Operand& dst, Register src) {
arithmetic_op(0x01, src, dst);
}
- void add(Register dst, Immediate src) {
+ void addq(Register dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src);
}
- void add(const Operand& dst, Immediate src) {
+ void addq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src);
}
- void cmp(Register dst, Register src) {
+ void addl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
+ void cmpb(Register dst, Immediate src) {
+ immediate_arithmetic_op_8(0x7, dst, src);
+ }
+
+ void cmpb(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_8(0x7, dst, src);
+ }
+
+ void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src);
}
- void cmp(Register dst, const Operand& src) {
+ void cmpq(Register dst, const Operand& src) {
arithmetic_op(0x3B, dst, src);
}
- void cmp(const Operand& dst, Register src) {
+ void cmpq(const Operand& dst, Register src) {
arithmetic_op(0x39, src, dst);
}
- void cmp(Register dst, Immediate src) {
+ void cmpq(Register dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
- void cmp(const Operand& dst, Immediate src) {
+ void cmpl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+
+ void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
@@ -527,15 +595,9 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x4, dst, src);
}
- void cmpb(const Operand& op, int8_t imm8);
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
-
- void dec_b(Register dst);
-
- void dec(Register dst);
- void dec(const Operand& dst);
+ void decq(Register dst);
+ void decq(const Operand& dst);
+ void decl(const Operand& dst);
// Sign-extends rax into rdx:rax.
void cqo();
@@ -543,13 +605,17 @@ class Assembler : public Malloced {
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src);
- void imul(Register dst, Register src);
- void imul(Register dst, const Operand& src);
- // Performs the operation dst = src * imm.
- void imul(Register dst, Register src, Immediate imm);
+ // Signed multiply instructions.
+ void imul(Register src); // rdx:rax = rax * src.
+ void imul(Register dst, Register src); // dst = dst * src.
+ void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
+ // Multiply 32 bit registers
+ void imull(Register dst, Register src); // dst = dst * src.
- void inc(Register dst);
- void inc(const Operand& dst);
+ void incq(Register dst);
+ void incq(const Operand& dst);
+ void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
@@ -610,6 +676,10 @@ class Assembler : public Malloced {
shift(dst, 0x4);
}
+ void shll(Register dst) {
+ shift_32(dst, 0x4);
+ }
+
void shr(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x5);
}
@@ -618,35 +688,48 @@ class Assembler : public Malloced {
shift(dst, 0x5);
}
+ void shrl(Register dst) {
+ shift_32(dst, 0x5);
+ }
+
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void sub(Register dst, Register src) {
+ void subq(Register dst, Register src) {
arithmetic_op(0x2B, dst, src);
}
- void sub(Register dst, const Operand& src) {
+ void subq(Register dst, const Operand& src) {
arithmetic_op(0x2B, dst, src);
}
- void sub(const Operand& dst, Register src) {
+ void subq(const Operand& dst, Register src) {
arithmetic_op(0x29, src, dst);
}
- void sub(Register dst, Immediate src) {
+ void subq(Register dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src);
}
- void sub(const Operand& dst, Immediate src) {
+ void subq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src);
}
+ void subl(Register dst, Register src) {
+ arithmetic_op_32(0x2B, dst, src);
+ }
+
+ void subl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
+ void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
arithmetic_op(0x33, dst, src);
@@ -668,18 +751,19 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x6, dst, src);
}
-
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
// Miscellaneous
+ void cpuid();
void hlt();
void int3();
void nop();
void nop(int n);
void rdtsc();
void ret(int imm16);
+ void setcc(Condition cc, Register reg);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -717,8 +801,6 @@ class Assembler : public Malloced {
// Conditional jumps
void j(Condition cc, Label* L);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
@@ -772,27 +854,40 @@ class Assembler : public Malloced {
void fwait();
void fnclex();
+ void fsin();
+ void fcos();
+
void frndint();
void sahf();
- void setcc(Condition cc, Register reg);
-
- void cpuid();
// SSE2 instructions
+ void movsd(const Operand& dst, XMMRegister src);
+ void movsd(Register src, XMMRegister dst);
+ void movsd(XMMRegister dst, Register src);
+ void movsd(XMMRegister src, const Operand& dst);
+
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
- void cvtsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, Register src);
+ void cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void cvtqsi2sd(XMMRegister dst, Register src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, Register src);
+
// Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
+ // void movdbl(XMMRegister dst, const Operand& src);
+ // void movdbl(const Operand& dst, XMMRegister src);
// Debugging
void Print();
@@ -813,16 +908,11 @@ class Assembler : public Malloced {
// Writes a doubleword of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void dd(uint32_t data);
+ // void dd(uint32_t data);
// Writes a quadword of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void dd(uint64_t data, RelocInfo::Mode reloc_info);
-
- // Writes the absolute address of a bound label at the given position in
- // the generated code. That positions should have the relocation mode
- // internal_reference!
- void WriteInternalReference(int position, const Label& bound_label);
+ // void dd(uint64_t data, RelocInfo::Mode reloc_info);
int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; }
@@ -841,11 +931,11 @@ class Assembler : public Malloced {
static const int kMinimalBufferSize = 4*KB;
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
+ // void movsd(XMMRegister dst, const Operand& src);
+ // void movsd(const Operand& dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ // void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ // void emit_sse_operand(XMMRegister dst, XMMRegister src);
private:
@@ -873,6 +963,7 @@ class Assembler : public Malloced {
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is set.
inline void emit_rex_64(Register reg, Register rm_reg);
+ inline void emit_rex_64(XMMRegister reg, Register rm_reg);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
@@ -880,6 +971,7 @@ class Assembler : public Malloced {
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is set.
inline void emit_rex_64(Register reg, const Operand& op);
+ inline void emit_rex_64(XMMRegister reg, const Operand& op);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the register code.
@@ -924,6 +1016,18 @@ class Assembler : public Malloced {
// is emitted.
inline void emit_optional_rex_32(Register reg, const Operand& op);
+ // As for emit_optional_rex_32(Register, Register), except that
+ // the registers are XMM registers.
+ inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
+
+ // As for emit_optional_rex_32(Register, Register), except that
+ // the registers are XMM registers.
+ inline void emit_optional_rex_32(XMMRegister reg, Register base);
+
+ // As for emit_optional_rex_32(Register, const Operand&), except that
+ // the register is an XMM register.
+ inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
+
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
@@ -938,7 +1042,7 @@ class Assembler : public Malloced {
// the second operand of the operation, a register or operation
// subcode, into the reg field of the ModR/M byte.
void emit_operand(Register reg, const Operand& adr) {
- emit_operand(reg.code() & 0x07, adr);
+ emit_operand(reg.low_bits(), adr);
}
// Emit the ModR/M byte, and optionally the SIB byte and
@@ -948,14 +1052,14 @@ class Assembler : public Malloced {
// Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
void emit_modrm(Register reg, Register rm_reg) {
- emit(0xC0 | (reg.code() & 0x7) << 3 | (rm_reg.code() & 0x7));
+ emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
}
// Emit a ModR/M byte with an operation subcode in the reg field and
// a register in the rm_reg field.
void emit_modrm(int code, Register rm_reg) {
- ASSERT((code & ~0x7) == 0);
- emit(0xC0 | (code & 0x7) << 3 | (rm_reg.code() & 0x7));
+ ASSERT(is_uint3(code));
+ emit(0xC0 | code << 3 | rm_reg.low_bits());
}
// Emit the code-object-relative offset of the label's position
@@ -966,18 +1070,34 @@ class Assembler : public Malloced {
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
+ void arithmetic_op_32(byte opcode, Register dst, Register src);
void arithmetic_op(byte opcode, Register reg, const Operand& op);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Operate on a 32-bit word in memory or register.
+ void immediate_arithmetic_op_32(byte subcode,
+ const Operand& dst,
+ Immediate src);
+ void immediate_arithmetic_op_32(byte subcode,
+ Register dst,
+ Immediate src);
+ // Operate on a byte in memory or register.
+ void immediate_arithmetic_op_8(byte subcode,
+ const Operand& dst,
+ Immediate src);
+ void immediate_arithmetic_op_8(byte subcode,
+ Register dst,
+ Immediate src);
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode);
+ void shift_32(Register dst, int subcode);
void emit_farith(int b1, int b2, int i);
// labels
- void print(Label* L);
+ // void print(Label* L);
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc
index 3f1cd9f..459921c 100644
--- a/V8Binding/v8/src/x64/builtins-x64.cc
+++ b/V8Binding/v8/src/x64/builtins-x64.cc
@@ -27,39 +27,681 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- Builtins::CFunctionId id) {
- masm->int3(); // UNIMPLEMENTED.
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+ // TODO(1238487): Don't pass the function in a static variable.
+ ExternalReference passed = ExternalReference::builtin_passed_function();
+ __ movq(kScratchRegister, passed.address(), RelocInfo::EXTERNAL_REFERENCE);
+ __ movq(Operand(kScratchRegister, 0), rdi);
+
+ // The actual argument count has already been loaded into register
+ // rax, but JumpToBuiltin expects rax to contain the number of
+ // arguments including the receiver.
+ __ incq(rax);
+ __ JumpToBuiltin(ExternalReference(id));
}
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Store the arguments adaptor context sentinel.
+ __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+
+ // Push the function on the stack.
+ __ push(rdi);
+
+ // Preserve the number of arguments on the stack. Must preserve both
+ // rax and rbx because these registers are used when copying the
+ // arguments and the receiver.
+ ASSERT(kSmiTagSize == 1);
+ __ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
+ __ push(rcx);
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // Retrieve the number of arguments from the stack. Number is a Smi.
+ __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Leave the frame.
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+
+ // Remove caller arguments from the stack.
+ // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
+ ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
+ ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(rcx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : actual number of arguments
+ // -- rbx : expected number of arguments
+ // -- rdx : code entry to call
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+ __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+ Label enough, too_few;
+ __ cmpq(rax, rbx);
+ __ j(less, &too_few);
+ __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ j(equal, &dont_adapt_arguments);
+
+ { // Enough parameters: Actual >= expected.
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all expected arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ movq(rcx, Immediate(-1)); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(rcx);
+ __ push(Operand(rax, 0));
+ __ subq(rax, Immediate(kPointerSize));
+ __ cmpq(rcx, rbx);
+ __ j(less, &copy);
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Copy receiver and all actual arguments.
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ movq(rcx, Immediate(-1)); // account for receiver
+
+ Label copy;
+ __ bind(&copy);
+ __ incq(rcx);
+ __ push(Operand(rdi, 0));
+ __ subq(rdi, Immediate(kPointerSize));
+ __ cmpq(rcx, rax);
+ __ j(less, &copy);
+
+ // Fill remaining expected arguments with undefined values.
+ Label fill;
+ __ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ bind(&fill);
+ __ incq(rcx);
+ __ push(kScratchRegister);
+ __ cmpq(rcx, rbx);
+ __ j(less, &fill);
+
+ // Restore function pointer.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ __ call(rdx);
+
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ ret(0);
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ jmp(rdx);
}
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+ // Stack Layout:
+ // rsp: return address
+ // +1: Argument n
+ // +2: Argument n-1
+ // ...
+ // +n: Argument 1 = receiver
+ // +n+1: Argument 0 = function to call
+ //
+ // rax contains the number of arguments, n, not counting the function.
+ //
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ testq(rax, rax);
+ __ j(not_zero, &done);
+ __ pop(rbx);
+ __ Push(Factory::undefined_value());
+ __ push(rbx);
+ __ incq(rax);
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call from the stack.
+ { Label done, non_function, function;
+ // The function to call is at position n+1 on the stack.
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(equal, &function);
+
+ // Non-function called: Clear the function to force exception.
+ __ bind(&non_function);
+ __ xor_(rdi, rdi);
+ __ jmp(&done);
+
+ // Function called: Change context eagerly to get the right global object.
+ __ bind(&function);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ __ bind(&done);
+ }
+
+ // 3. Make sure first argument is an object; convert if necessary.
+ { Label call_to_object, use_global_receiver, patch_receiver, done;
+ __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+
+ __ Cmp(rbx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ Cmp(rbx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &call_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(below_equal, &done);
+
+ __ bind(&call_to_object);
+ __ EnterInternalFrame(); // preserves rax, rbx, rdi
+
+ // Store the arguments count on the stack (smi tagged).
+ ASSERT(kSmiTag == 0);
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ push(rax);
+
+ __ push(rdi); // save edi across the call
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ pop(rdi); // restore edi after the call
+
+ // Get the arguments count and untag it.
+ __ pop(rax);
+ __ shr(rax, Immediate(kSmiTagSize));
+
+ __ LeaveInternalFrame();
+ __ jmp(&patch_receiver);
+
+ // Use the global receiver object from the called function as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+
+ __ bind(&done);
+ }
+
+ // 4. Shift stuff one slot down the stack.
+ { Label loop;
+ __ lea(rcx, Operand(rax, +1)); // +1 ~ copy receiver too
+ __ bind(&loop);
+ __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+ __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+ __ decq(rcx);
+ __ j(not_zero, &loop);
+ }
+
+ // 5. Remove TOS (copy of last arguments), but keep return address.
+ __ pop(rbx);
+ __ pop(rcx);
+ __ push(rbx);
+ __ decq(rax);
+
+ // 6. Check that function really was a function and get the code to
+ // call from the function and check that the number of expected
+ // arguments matches what we're providing.
+ { Label invoke, trampoline;
+ __ testq(rdi, rdi);
+ __ j(not_zero, &invoke);
+ __ xor_(rbx, rbx);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ __ bind(&trampoline);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&invoke);
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ cmpq(rax, rbx);
+ __ j(not_equal, &trampoline);
+ }
+
+ // 7. Jump (tail-call) to the code in register edx without checking arguments.
+ ParameterCount expected(0);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ // Stack at entry:
+ // rsp: return address
+ // rsp+8: arguments
+ // rsp+16: receiver ("this")
+ // rsp+24: function
+ __ EnterInternalFrame();
+ // Stack frame:
+ // rbp: Old base pointer
+ // rbp[1]: return address
+ // rbp[2]: function arguments
+ // rbp[3]: receiver
+ // rbp[4]: function
+ static const int kArgumentsOffset = 2 * kPointerSize;
+ static const int kReceiverOffset = 3 * kPointerSize;
+ static const int kFunctionOffset = 4 * kPointerSize;
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(Operand(rbp, kArgumentsOffset));
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ if (FLAG_check_stack) {
+ // We need to catch preemptions right here, otherwise an unlucky preemption
+ // could show up as a failed apply.
+ Label retry_preemption;
+ Label no_preemption;
+ __ bind(&retry_preemption);
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_guard_limit);
+ __ movq(rcx, rsp);
+ __ subq(rcx, Operand(kScratchRegister, 0));
+ // rcx contains the difference between the stack limit and the stack top.
+ // We use it below to check that there is enough room for the arguments.
+ __ j(above, &no_preemption);
+
+ // Preemption!
+ // Because runtime functions always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack.
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(0)));
+
+ // Do call to runtime routine.
+ __ CallRuntime(Runtime::kStackGuard, 1);
+ __ pop(rax);
+ __ jmp(&retry_preemption);
+
+ __ bind(&no_preemption);
+
+ Label okay;
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ movq(rdx, rax);
+ __ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay);
+
+ // Too bad: Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ }
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ push(rax); // limit
+ __ push(Immediate(0)); // index
+
+ // Change context eagerly to get the right global object if
+ // necessary.
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ movq(rbx, Operand(rbp, kReceiverOffset));
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &call_to_object);
+ __ Cmp(rbx, Factory::null_value());
+ __ j(equal, &use_global_receiver);
+ __ Cmp(rbx, Factory::undefined_value());
+ __ j(equal, &use_global_receiver);
+
+ // If given receiver is already a JavaScript object then there's no
+ // reason for converting it.
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(less, &call_to_object);
+ __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+ __ j(less_equal, &push_receiver);
+
+ // Convert the receiver to an object.
+ __ bind(&call_to_object);
+ __ push(rbx);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movq(rbx, rax);
+ __ jmp(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ __ bind(&push_receiver);
+ __ push(rbx);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rcx, Operand(rbp, kArgumentsOffset)); // load arguments
+ __ push(rcx);
+ __ push(rax);
+
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+
+ // Remove IC arguments from the stack and push the nth argument.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+
+ // Update the index on the stack and in register rax.
+ __ movq(rax, Operand(rbp, kIndexOffset));
+ __ addq(rax, Immediate(Smi::FromInt(1)));
+ __ movq(Operand(rbp, kIndexOffset), rax);
+
+ __ bind(&entry);
+ __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ j(not_equal, &loop);
+
+ // Invoke the function.
+ ParameterCount actual(rax);
+ __ shr(rax, Immediate(kSmiTagSize));
+ __ movq(rdi, Operand(rbp, kFunctionOffset));
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+ __ LeaveInternalFrame();
+ __ ret(3 * kPointerSize); // remove function, receiver, and arguments
}
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments
+ // -- rdi: constructor function
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that function is not a smi.
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &non_function_call);
+
+ // Jump to the function-specific construct stub.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+ __ jmp(rbx);
+
+ // edi: called object
+ // eax: number of arguments
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing eax).
+ __ movq(rbx, Immediate(0));
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Store a smi-tagged arguments count on the stack.
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ push(rax);
+
+ // Push the function to invoke on the stack.
+ __ push(rdi);
+
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+
+ // TODO(x64): Implement inlined allocation.
+
+ // Allocate the new receiver object using the runtime call.
+ // rdi: function (constructor)
+ __ bind(&rt_call);
+ // Must restore edi (constructor) before calling runtime.
+ __ movq(rdi, Operand(rsp, 0));
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ movq(rbx, rax); // store result in rbx
+
+ // New object allocated.
+ // rbx: newly allocated object
+ __ bind(&allocated);
+ // Retrieve the function from the stack.
+ __ pop(rdi);
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movq(rax, Operand(rsp, 0));
+ __ shr(rax, Immediate(kSmiTagSize));
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(rbx);
+ __ push(rbx);
+
+ // Setup pointer to last argument.
+ __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(greater_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame.
+ __ bind(&exit);
+ __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count
+ __ LeaveConstructFrame();
+
+ // Remove caller arguments from the stack and return.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(rcx);
+ __ ret(0);
}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Expects five C++ function parameters.
+ // - Address entry (ignored)
+ // - JSFunction* function (
+ // - Object* receiver
+ // - int argc
+ // - Object*** argv
+ // (see Handle::Invoke in execution.cc).
+
+ // Platform specific argument handling. After this, the stack contains
+ // an internal frame and the pushed function and receiver, and
+ // register rax and rbx holds the argument count and argument array,
+ // while rdi holds the function pointer and rsi the context.
+#ifdef __MSVC__
+ // MSVC parameters in:
+ // rcx : entry (ignored)
+ // rdx : function
+ // r8 : receiver
+ // r9 : argc
+ // [rsp+0x20] : argv
+
+ // Clear the context before we push it when entering the JS frame.
+ __ xor_(rsi, rsi);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Load the function context into rsi.
+ __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(rdx);
+ __ push(r8);
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, r9);
+ // Load the previous frame pointer to access C argument on stack
+ __ movq(kScratchRegister, Operand(rbp, 0));
+ __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+ // Load the function pointer into rdi.
+ __ movq(rdi, rdx);
+#else // !defined(__MSVC__)
+ // GCC parameters in:
+ // rdi : entry (ignored)
+ // rsi : function
+ // rdx : receiver
+ // rcx : argc
+ // r8 : argv
+
+ __ movq(rdi, rsi);
+ // rdi : function
+
+ // Clear the context before we push it when entering the JS frame.
+ __ xor_(rsi, rsi);
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push the function and receiver and setup the context.
+ __ push(rdi);
+ __ push(rdx);
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Load the number of arguments and setup pointer to the arguments.
+ __ movq(rax, rcx);
+ __ movq(rbx, r8);
+#endif // __MSVC__
+ // Current stack contents:
+ // [rsp + 2 * kPointerSize ... ]: Internal frame
+ // [rsp + kPointerSize] : function
+ // [rsp] : receiver
+ // Current register contents:
+ // rax : argc
+ // rbx : argv
+ // rsi : context
+ // rdi : function
+
+ // Copy arguments to the stack in a loop.
+ // Register rbx points to array of pointers to handle locations.
+ // Push the values of these handles.
+ Label loop, entry;
+ __ xor_(rcx, rcx); // Set loop variable to 0.
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addq(rcx, Immediate(1));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(not_equal, &loop);
+
+ // Invoke the code.
+ if (is_construct) {
+ // Expects rdi to hold function pointer.
+ __ movq(kScratchRegister,
+ Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ RelocInfo::CODE_TARGET);
+ __ call(kScratchRegister);
+ } else {
+ ParameterCount actual(rax);
+ // Function must be in rdi.
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ }
+
+ // Exit the JS frame. Notice that this also removes the empty
+ // context and the function left on the stack by the code
+ // invocation.
+ __ LeaveInternalFrame();
+ // TODO(X64): Is argument correct? Is there a receiver to remove?
+ __ ret(1 * kPointerSize); // remove receiver
+}
+
+
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- masm->int3(); // UNIMPLEMENTED.
+ Generate_JSEntryTrampolineHelper(masm, false);
}
-} } // namespace v8::internal
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64-inl.h b/V8Binding/v8/src/x64/codegen-x64-inl.h
index 0d5b0e2..6869fc9 100644
--- a/V8Binding/v8/src/x64/codegen-x64-inl.h
+++ b/V8Binding/v8/src/x64/codegen-x64-inl.h
@@ -32,10 +32,24 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
// Platform-specific inline functions.
-void DeferredCode::Jump() { UNIMPLEMENTED(); }
-void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ GenerateFastMathOp(COS, args);
+}
+
+#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index ca58e09..1854aaa 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -25,22 +25,80 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include "v8.h"
-#include "macro-assembler.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "ic-inl.h"
+#include "parser.h"
#include "register-allocator-inl.h"
-#include "codegen.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
-void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
+ }
+ }
+}
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
+ }
+ }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+ : owner_(owner),
+ typeof_state_(NOT_INSIDE_TYPEOF),
+ destination_(NULL),
+ previous_(NULL) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
+ ControlDestination* destination)
+ : owner_(owner),
+ typeof_state_(typeof_state),
+ destination_(destination),
+ previous_(owner->state()) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+ ASSERT(owner_->state() == this);
+ owner_->set_state(previous_);
+}
-void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation.
CodeGenerator::CodeGenerator(int buffer_size,
Handle<Script> script,
@@ -58,17 +116,257 @@ CodeGenerator::CodeGenerator(int buffer_size,
in_spilled_code_(false) {
}
-#define __ masm->
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals. The inevitable call
+ // will sync frame elements to memory anyway, so we do it eagerly to
+ // allow us to push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
- UNIMPLEMENTED();
+ __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(rsi); // The context is the second argument.
+ frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
}
-void CodeGenerator::GenCode(FunctionLiteral* a) {
- masm_->int3(); // UNIMPLEMENTED
+
+void CodeGenerator::GenCode(FunctionLiteral* function) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(function);
+ ZoneList<Statement*>* body = function->body();
+
+ // Initialize state.
+ ASSERT(scope_ == NULL);
+ scope_ = function->scope();
+ ASSERT(allocator_ == NULL);
+ RegisterAllocator register_allocator(this);
+ allocator_ = &register_allocator;
+ ASSERT(frame_ == NULL);
+ frame_ = new VirtualFrame();
+ set_in_spilled_code(false);
+
+ // Adjust for function-level loop nesting.
+ loop_nesting_ += function->loop_nesting();
+
+ JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ // fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ false) {
+ frame_->SpillAll();
+ __ int3();
+ }
+#endif
+
+ // New scope to get automatic timing calculation.
+ { // NOLINT
+ HistogramTimerScope codegen_timer(&Counters::code_generation);
+ CodeGenState state(this);
+
+ // Entry:
+ // Stack: receiver, arguments, return address.
+ // rbp: caller's frame pointer
+ // rsp: stack pointer
+ // rdi: called JS function
+ // rsi: callee's context
+ allocator_->Initialize();
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
+ // Allocate the local context if needed.
+ if (scope_->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+
+ // Update context local.
+ frame_->SaveContextRegister();
+
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
+ }
+
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope_->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
+ }
+ }
+ }
+
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
+
+ // Generate code to 'execute' declarations and initialize functions
+ // (source elements). In case of an illegal redeclaration we need to
+ // handle that instead of processing the declarations.
+ if (scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope_->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope_->declarations());
+ // Bail out if a stack-overflow exception occurred when processing
+ // declarations.
+ if (HasStackOverflow()) return;
+ }
+
+ if (FLAG_trace) {
+ frame_->CallRuntime(Runtime::kTraceEnter, 0);
+ // Ignore the return value.
+ }
+ CheckStack();
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) {
+ frame_->CallRuntime(Runtime::kDebugTrace, 0);
+ // Ignore the return value.
+ }
+#endif
+ VisitStatements(body);
+
+ // Handle the return from the function.
+ if (has_valid_frame()) {
+ // If there is a valid frame, control flow can fall off the end of
+ // the body. In that case there is an implicit return statement.
+ ASSERT(!function_return_is_shadowed_);
+ CodeForReturnPosition(function);
+ frame_->PrepareForReturn();
+ Result undefined(Factory::undefined_value());
+ if (function_return_.is_bound()) {
+ function_return_.Jump(&undefined);
+ } else {
+ function_return_.Bind(&undefined);
+ GenerateReturnSequence(&undefined);
+ }
+ } else if (function_return_.is_linked()) {
+ // If the return target has dangling jumps to it, then we have not
+ // yet generated the return sequence. This can happen when (a)
+ // control does not flow off the end of the body so we did not
+ // compile an artificial return statement just above, and (b) there
+ // are return statements in the body but (c) they are all shadowed.
+ Result return_value;
+ function_return_.Bind(&return_value);
+ GenerateReturnSequence(&return_value);
+ }
+ }
+ }
+
+ // Adjust for function-level loop nesting.
+ loop_nesting_ -= function->loop_nesting();
+
+ // Code generation state must be reset.
+ ASSERT(state_ == NULL);
+ ASSERT(loop_nesting() == 0);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
+
+ // Process any deferred code using the register allocator.
+ if (!HasStackOverflow()) {
+ HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
+ JumpTarget::set_compiling_deferred_code(true);
+ ProcessDeferred();
+ JumpTarget::set_compiling_deferred_code(false);
+ }
+
+ // There is no need to delete the register allocator, it is a
+ // stack-allocated local.
+ allocator_ = NULL;
+ scope_ = NULL;
}
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+ // The return value is a live (but not currently reference counted)
+ // reference to rax. This is safe because the current frame does not
+ // contain a reference to rax (it is prepared for the return by spilling
+ // all registers).
+ if (FLAG_trace) {
+ frame_->Push(return_value);
+ *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+ return_value->ToRegister(rax);
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Leave the frame and return popping the arguments and the
+ // receiver.
+ frame_->Exit();
+ masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+ DeleteFrame();
+
+ // TODO(x64): introduce kX64JSReturnSequenceLength and enable assert.
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ // ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+ // masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+}
+
+
void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
int b,
int c,
@@ -78,166 +376,5778 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
UNIMPLEMENTED();
}
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
- UNIMPLEMENTED();
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+ return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
+ && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
+ && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
+ && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
+ && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
+ && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
+ && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
+ && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
+ && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
+ && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
+ && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0))
+ && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
+#endif
-void CodeGenerator::VisitBlock(Block* a) {
- UNIMPLEMENTED();
+
+class DeferredStackCheck: public DeferredCode {
+ public:
+ DeferredStackCheck() {
+ set_comment("[ DeferredStackCheck");
+ }
+
+ virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
-void CodeGenerator::VisitDeclaration(Declaration* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::CheckStack() {
+ if (FLAG_check_stack) {
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_guard_limit);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ deferred->Branch(below);
+ deferred->BindExit();
+ }
}
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
- UNIMPLEMENTED();
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ // TODO(X64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
}
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
}
-void CodeGenerator::VisitIfStatement(IfStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ ASSERT(!in_spilled_code());
+ for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+ Visit(statements->at(i));
+ }
}
-void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitBlock(Block* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ Block");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ VisitStatements(node->statements());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
}
-void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+ Comment cmnt(masm_, "[ Declaration");
+ CodeForStatementPosition(node);
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->is_dynamic());
+ // For now, just do a runtime call. Sync the virtual frame eagerly
+ // so we can simply push the arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ __ movq(kScratchRegister, Factory::the_hole_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ } else if (node->fun() != NULL) {
+ Load(node->fun());
+ } else {
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
+ }
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
+ return;
+ }
+
+ ASSERT(!var->is_global());
+
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
+ }
+
+ if (val != NULL) {
+ {
+ // Set the initial value.
+ Reference target(this, node->proxy());
+ Load(val);
+ target.SetValue(NOT_CONST_INIT);
+ // The reference is removed from the stack (preserving TOS) when
+ // it goes out of scope.
+ }
+ // Get rid of the assigned value (declarations are statements).
+ frame_->Drop();
+ }
}
-void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ CodeForStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ Load(expression);
+ // Remove the lingering expression result from the top of stack.
+ frame_->Drop();
}
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "// EmptyStatement");
+ CodeForStatementPosition(node);
+ // nothing to do
}
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
+
+ CodeForStatementPosition(node);
+ JumpTarget exit;
+ if (has_then_stm && has_else_stm) {
+ JumpTarget then;
+ JumpTarget else_;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Visit(node->else_statement());
+
+ // We may have dangling jumps to the then part.
+ if (then.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Visit(node->then_statement());
+
+ if (else_.is_linked()) {
+ if (has_valid_frame()) exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ }
+
+ } else if (has_then_stm) {
+ ASSERT(!has_else_stm);
+ JumpTarget then;
+ ControlDestination dest(&then, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // then part.
+ if (then.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ then.Bind();
+ Visit(node->then_statement());
+ }
+ } else {
+ // The then label was bound.
+ Visit(node->then_statement());
+ }
+
+ } else if (has_else_stm) {
+ ASSERT(!has_then_stm);
+ JumpTarget else_;
+ ControlDestination dest(&exit, &else_, false);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.true_was_fall_through()) {
+ // The exit label was bound. We may have dangling jumps to the
+ // else part.
+ if (else_.is_linked()) {
+ exit.Unuse();
+ exit.Jump();
+ else_.Bind();
+ Visit(node->else_statement());
+ }
+ } else {
+ // The else label was bound.
+ Visit(node->else_statement());
+ }
+
+ } else {
+ ASSERT(!has_then_stm && !has_else_stm);
+ // We only care about the condition's side effects (not its value
+ // or control flow effect). LoadCondition is called without
+ // forcing control flow.
+ ControlDestination dest(&exit, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ if (!dest.is_used()) {
+ // We got a value on the frame rather than (or in addition to)
+ // control flow.
+ frame_->Drop();
+ }
+ }
+
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
}
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ContinueStatement");
+ CodeForStatementPosition(node);
+ node->target()->continue_target()->Jump();
}
-void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ BreakStatement");
+ CodeForStatementPosition(node);
+ node->target()->break_target()->Jump();
}
-void CodeGenerator::VisitForInStatement(ForInStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ReturnStatement");
+
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result return_value = frame_->Pop();
+ if (function_return_is_shadowed_) {
+ function_return_.Jump(&return_value);
+ } else {
+ frame_->PrepareForReturn();
+ if (function_return_.is_bound()) {
+ // If the function return label is already bound we reuse the
+ // code by jumping to the return site.
+ function_return_.Jump(&return_value);
+ } else {
+ function_return_.Bind(&return_value);
+ GenerateReturnSequence(&return_value);
+ }
+ }
}
-void CodeGenerator::VisitTryCatch(TryCatch* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ CodeForStatementPosition(node);
+ Load(node->expression());
+ Result context;
+ if (node->is_catch_block()) {
+ context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kPushContext, 1);
+ }
+
+ // Update context local.
+ frame_->SaveContextRegister();
+
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
+ }
}
-void CodeGenerator::VisitTryFinally(TryFinally* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WithExitStatement");
+ CodeForStatementPosition(node);
+ // Pop context.
+ __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
+ // Update context local.
+ frame_->SaveContextRegister();
}
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ // TODO(X64): This code is completely generic and should be moved somewhere
+ // where it can be shared between architectures.
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ SwitchStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Compile the switch value.
+ Load(node->tag());
+
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+ CaseClause* default_clause = NULL;
+
+ JumpTarget next_test;
+ // Compile the case label expressions and comparisons. Exit early
+ // if a comparison is unconditionally true. The target next_test is
+ // bound before the loop in order to indicate control flow to the
+ // first comparison.
+ next_test.Bind();
+ for (int i = 0; i < length && !next_test.is_unused(); i++) {
+ CaseClause* clause = cases->at(i);
+ // The default is not a test, but remember it for later.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ // We recycle the same target next_test for each test. Bind it if
+ // the previous test has not done so and then unuse it for the
+ // loop.
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ next_test.Unuse();
+
+ // Duplicate the switch value.
+ frame_->Dup();
+
+ // Compile the label expression.
+ Load(clause->label());
+
+ // Compare and branch to the body if true or the next test if
+ // false. Prefer the next test as a fall through.
+ ControlDestination dest(clause->body_target(), &next_test, false);
+ Comparison(equal, true, &dest);
+
+ // If the comparison fell through to the true target, jump to the
+ // actual body.
+ if (dest.true_was_fall_through()) {
+ clause->body_target()->Unuse();
+ clause->body_target()->Jump();
+ }
+ }
+
+ // If there was control flow to a next test from the last one
+ // compiled, compile a jump to the default or break target.
+ if (!next_test.is_unused()) {
+ if (next_test.is_linked()) {
+ next_test.Bind();
+ }
+ // Drop the switch value.
+ frame_->Drop();
+ if (default_clause != NULL) {
+ default_clause->body_target()->Jump();
+ } else {
+ node->break_target()->Jump();
+ }
+ }
+
+ // The last instruction emitted was a jump, either to the default
+ // clause or the break target, or else to a case body from the loop
+ // that compiles the tests.
+ ASSERT(!has_valid_frame());
+ // Compile case bodies as needed.
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
+
+ // There are two ways to reach the body: from the corresponding
+ // test or as the fall through of the previous body.
+ if (clause->body_target()->is_linked() || has_valid_frame()) {
+ if (clause->body_target()->is_linked()) {
+ if (has_valid_frame()) {
+ // If we have both a jump to the test and a fall through, put
+ // a jump on the fall through path to avoid the dropping of
+ // the switch value on the test path. The exception is the
+ // default which has already had the switch value dropped.
+ if (clause->is_default()) {
+ clause->body_target()->Bind();
+ } else {
+ JumpTarget body;
+ body.Jump();
+ clause->body_target()->Bind();
+ frame_->Drop();
+ body.Bind();
+ }
+ } else {
+ // No fall through to worry about.
+ clause->body_target()->Bind();
+ if (!clause->is_default()) {
+ frame_->Drop();
+ }
+ }
+ } else {
+ // Otherwise, we have only fall through.
+ ASSERT(has_valid_frame());
+ }
+
+ // We are now prepared to compile the body.
+ Comment cmnt(masm_, "[ Case body");
+ VisitStatements(clause->statements());
+ }
+ clause->body_target()->Unuse();
+ }
+
+ // We may not have a valid frame here so bind the break target only
+ // if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
}
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ LoopStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
+ // known result for the test expression, with no side effects.
+ enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+ if (node->cond() == NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ info = ALWAYS_TRUE;
+ } else {
+ Literal* lit = node->cond()->AsLiteral();
+ if (lit != NULL) {
+ if (lit->IsTrue()) {
+ info = ALWAYS_TRUE;
+ } else if (lit->IsFalse()) {
+ info = ALWAYS_FALSE;
+ }
+ }
+ }
+
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP: {
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
+
+ // Label the top of the loop for the backward jump if necessary.
+ if (info == ALWAYS_TRUE) {
+ // Use the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else if (info == ALWAYS_FALSE) {
+ // No need to label it.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Continue is the test, so use the backward body target.
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ body.Bind();
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Compile the test.
+ if (info == ALWAYS_TRUE) {
+ // If control flow can fall off the end of the body, jump back
+ // to the top and bind the break target at the exit.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+
+ } else if (info == ALWAYS_FALSE) {
+ // We may have had continues or breaks in the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+
+ } else {
+ ASSERT(info == DONT_KNOW);
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ }
+ break;
+ }
+
+ case LoopStatement::WHILE_LOOP: {
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function
+ // literal twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything.
+ if (info == ALWAYS_FALSE) break;
+
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop with the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ // Continue is the test at the bottom, no need to label the
+ // test at the top. The body is a backward target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Label the test at the top as the continue target. The
+ // body is a forward-only target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ }
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may
+ // have been unconditionally false (if there are no jumps to
+ // the body).
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ if (info == ALWAYS_TRUE) {
+ // The loop body has been labeled with the continue target.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ // If we have chosen to recompile the test at the bottom,
+ // then it is the continue target.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here and thus an invalid fall-through).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // If we have chosen not to recompile the test at the
+ // bottom, jump back to the one at the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
+
+ case LoopStatement::FOR_LOOP: {
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function
+ // literal twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+
+ // Compile the init expression if present.
+ if (node->init() != NULL) {
+ Visit(node->init());
+ }
+
+ IncrementLoopNesting();
+
+ // If the condition is always false and has no side effects, we
+ // do not need to compile anything else.
+ if (info == ALWAYS_FALSE) break;
+
+ // Target for backward edge if no test at the bottom, otherwise
+ // unused.
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+ // Target for backward edge if there is a test at the bottom,
+ // otherwise used as target for test at the top.
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ if (info == ALWAYS_TRUE) {
+ // We will not compile the test expression. Label the top of
+ // the loop.
+ if (node->next() == NULL) {
+ // Use the continue target if there is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // Otherwise use the backward loop target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW);
+ if (test_at_bottom) {
+ // Continue is either the update expression or the test at
+ // the bottom, no need to label the test at the top.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else if (node->next() == NULL) {
+ // We are not recompiling the test at the bottom and there
+ // is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // We are not recompiling the test at the bottom and there
+ // is an update expression.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+
+ // Compile the test with the body as the true target and
+ // preferred fall-through and with the break target as the
+ // false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may
+ // have been unconditionally false (if there are no jumps to
+ // the body).
+ if (!body.is_linked()) break;
+
+ // Otherwise, jump around the body on the fall through and
+ // then bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // If there is an update expression, compile it if necessary.
+ if (node->next() != NULL) {
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ // Control can reach the update by falling out of the body or
+ // by a continue.
+ if (has_valid_frame()) {
+ // Record the source position of the statement as this code
+ // which is after the code for the body actually belongs to
+ // the loop statement and not the body.
+ CodeForStatementPosition(node);
+ Visit(node->next());
+ }
+ }
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ if (info == ALWAYS_TRUE) {
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ } else {
+ ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
+ if (test_at_bottom) {
+ if (node->continue_target()->is_linked()) {
+ // We can have dangling jumps to the continue target if
+ // there was no update expression.
+ node->continue_target()->Bind();
+ }
+ // Control can reach the test at the bottom by falling out
+ // of the body, by a continue in the body, or from the
+ // update expression.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a
+ // backward jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // Otherwise, jump back to the test at the top.
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ }
+ }
+
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
+ }
+
+ DecrementLoopNesting();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ForInStatement");
+ CodeForStatementPosition(node);
+
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
+
+ // Get the object to enumerate over (converted to JSObject).
+ LoadAndSpill(node->enumerable());
+
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ frame_->EmitPop(rax);
+
+ // rax: value to be iterated over
+ __ Cmp(rax, Factory::undefined_value());
+ exit.Branch(equal);
+ __ Cmp(rax, Factory::null_value());
+ exit.Branch(equal);
+
+ // Stack layout in body:
+ // [iteration counter (smi)] <- slot 0
+ // [length of array] <- slot 1
+ // [FixedArray] <- slot 2
+ // [Map or 0] <- slot 3
+ // [Object] <- slot 4
+
+ // Check if enumerable is already a JSObject
+ // rax: value to be iterated over
+ __ testl(rax, Immediate(kSmiTagMask));
+ primitive.Branch(zero);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ jsobject.Branch(above_equal);
+
+ primitive.Bind();
+ frame_->EmitPush(rax);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+ // function call returns the value in rax, which is where we want it below
+
+ jsobject.Bind();
+ // Get the set of properties (as a FixedArray or Map).
+ // rax: value to be iterated over
+ frame_->EmitPush(rax); // push the object being iterated over (slot 4)
+
+ frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // rax: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
+ __ movq(rdx, rax);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ Cmp(rcx, Factory::meta_map());
+ fixed_array.Branch(not_equal);
+
+ // Get enum cache
+ // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+ __ movq(rcx, rax);
+ __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
+ // Get the bridge array held in the enumeration index field.
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+ // Get the cache from the bridge array.
+ __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ frame_->EmitPush(rax); // <- slot 3
+ frame_->EmitPush(rdx); // <- slot 2
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ shl(rax, Immediate(kSmiTagSize));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ entry.Jump();
+
+ fixed_array.Bind();
+ // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
+ frame_->EmitPush(rax); // <- slot 2
+
+ // Push the length of the array and the initial index onto the stack.
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ shl(rax, Immediate(kSmiTagSize));
+ frame_->EmitPush(rax); // <- slot 1
+ frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+
+ // Condition.
+ entry.Bind();
+ // Grab the current frame's height for the break and continue
+ // targets only after all the state is pushed on the frame.
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ __ movq(rax, frame_->ElementAt(0)); // load the current count
+ __ cmpq(rax, frame_->ElementAt(1)); // compare to the array length
+ node->break_target()->Branch(above_equal);
+
+ // Get the i'th entry of the array.
+ __ movq(rdx, frame_->ElementAt(2));
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ // Multiplier is times_4 since rax is already a Smi.
+ __ movq(rbx, Operand(rdx, rax, times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case rax: current iteration count rbx: i'th entry
+ // of the enum cache
+ __ movq(rdx, frame_->ElementAt(3));
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ // rax: current iteration count
+ // rbx: i'th entry of the enum cache
+ // rdx: expected map value
+ __ movq(rcx, frame_->ElementAt(4));
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpq(rcx, rdx);
+ end_del_check.Branch(equal);
+
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(rbx); // push entry
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
+ __ movq(rbx, rax);
+
+ // If the property has been removed while iterating, we just skip it.
+ __ Cmp(rbx, Factory::null_value());
+ node->continue_target()->Branch(equal);
+
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. rdx: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(rbx);
+ { Reference each(this, node->each());
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+ if (!each.is_illegal()) {
+ if (each.size() > 0) {
+ frame_->EmitPush(frame_->ElementAt(each.size()));
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->Drop();
+ }
+ }
+ }
+ // Unloading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll();
+
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
+ // Body.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // Next. Reestablish a spilled frame in case we are coming here via
+ // a continue in the body.
+ node->continue_target()->Bind();
+ frame_->SpillAll();
+ frame_->EmitPop(rax);
+ __ addq(rax, Immediate(Smi::FromInt(1)));
+ frame_->EmitPush(rax);
+ entry.Jump();
+
+ // Cleanup. No need to spill because VirtualFrame::Drop is safe for
+ // any frame.
+ node->break_target()->Bind();
+ frame_->Drop(5);
+
+ // Exit.
+ exit.Bind();
+
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryCatch");
+ CodeForStatementPosition(node);
+
+ JumpTarget try_block;
+ JumpTarget exit;
+
+ try_block.Call();
+ // --- Catch block ---
+ frame_->EmitPush(rax);
+
+ // Store the caught exception in the catch variable.
+ { Reference ref(this, node->catch_var());
+ ASSERT(ref.is_slot());
+ // Load the exception to the top of the stack. Here we make use of the
+ // convenient property that it doesn't matter whether a value is
+ // immediately on top of or underneath a zero-sized reference.
+ ref.SetValue(NOT_CONST_INIT);
+ }
+
+ // Remove the exception from the stack.
+ frame_->Drop();
+
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (has_valid_frame()) {
+ exit.Jump();
+ }
+
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ bool has_unlinks = false;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ has_unlinks = has_unlinks || shadows[i]->is_linked();
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // Make sure that there's nothing left on the stack above the
+ // handler structure.
+ if (FLAG_debug_code) {
+ __ movq(kScratchRegister, handler_address);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ Assert(equal, "stack pointer should point to top handler");
+ }
+
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame. Unlink from
+ // the handler list and drop the rest of this handler from the
+ // frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ if (has_unlinks) {
+ exit.Jump();
+ }
+ }
+
+ // Generate unlink code for the (formerly) shadowing targets that
+ // have been jumped to. Deallocate each shadow target.
+ Result return_value;
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain; be careful not to destroy the TOS if
+ // there is one.
+ if (i == kReturnShadowIndex) {
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
+
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ if (i == kReturnShadowIndex) {
+ if (!function_return_is_shadowed_) frame_->PrepareForReturn();
+ shadows[i]->other_target()->Jump(&return_value);
+ } else {
+ shadows[i]->other_target()->Jump();
+ }
+ }
+ }
+
+ exit.Bind();
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryFinally");
+ CodeForStatementPosition(node);
+
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
+
+ JumpTarget try_block;
+ JumpTarget finally_block;
+
+ try_block.Call();
+
+ frame_->EmitPush(rax);
+ // In case of thrown exceptions, this is where we continue.
+ __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+ finally_block.Jump();
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the jump targets for all escapes from the try block, including
+ // returns. During shadowing, the original target is hidden as the
+ // ShadowTarget and operations on the original actually affect the
+ // shadowing target.
+ //
+ // We should probably try to unify the escaping targets and the return
+ // target.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original targets are unshadowed and the
+ // ShadowTargets represent the formerly shadowing targets.
+ int nof_unlinks = 0;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // If we can fall off the end of the try block, unlink from the try
+ // chain and set the state on the frame to FALLING.
+ if (has_valid_frame()) {
+ // The next handler address is on top of the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ // Fake a top of stack value (unneeded when FALLING) and set the
+ // state in ecx, then jump around the unlink blocks if any.
+ __ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) {
+ finally_block.Jump();
+ }
+ }
+
+ // Generate code to unlink and set the state for the (formerly)
+ // shadowing targets that have been jumped to.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // If we have come from the shadowed return, the return value is
+ // on the virtual frame. We must preserve it until it is
+ // pushed.
+ if (i == kReturnShadowIndex) {
+ Result return_value;
+ shadows[i]->Bind(&return_value);
+ return_value.ToRegister(rax);
+ } else {
+ shadows[i]->Bind();
+ }
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that
+ // we break from (eg, for...in) may have left stuff on the
+ // stack.
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ frame_->Forget(frame_->height() - handler_height);
+
+ // Unlink this handler and drop it from the frame.
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ movq(kScratchRegister, handler_address);
+ frame_->EmitPop(Operand(kScratchRegister, 0));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ if (i == kReturnShadowIndex) {
+ // If this target shadowed the function return, materialize
+ // the return value on the stack.
+ frame_->EmitPush(rax);
+ } else {
+ // Fake TOS for targets that shadowed breaks and continues.
+ __ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ }
+ __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ if (--nof_unlinks > 0) {
+ // If this is not the last unlink block, jump around the next.
+ finally_block.Jump();
+ }
+ }
+ }
+
+ // --- Finally block ---
+ finally_block.Bind();
+
+ // Push the state on the stack.
+ frame_->EmitPush(rcx);
+
+ // We keep two elements on the stack - the (possibly faked) result
+ // and the state - while evaluating the finally block.
+ //
+ // Generate code for the statements in the finally block.
+ VisitStatementsAndSpill(node->finally_block()->statements());
+
+ if (has_valid_frame()) {
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(rcx);
+ frame_->EmitPop(rax);
+ }
+
+ // Generate code to jump to the right destination for all used
+ // formerly shadowing targets. Deallocate each shadow target.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (has_valid_frame() && shadows[i]->is_bound()) {
+ BreakTarget* original = shadows[i]->other_target();
+ __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ if (i == kReturnShadowIndex) {
+ // The return value is (already) in rax.
+ Result return_value = allocator_->Allocate(rax);
+ ASSERT(return_value.is_valid());
+ if (function_return_is_shadowed_) {
+ original->Branch(equal, &return_value);
+ } else {
+ // Branch around the preparation for return which may emit
+ // code.
+ JumpTarget skip;
+ skip.Branch(not_equal);
+ frame_->PrepareForReturn();
+ original->Jump(&return_value);
+ skip.Bind();
+ }
+ } else {
+ original->Branch(equal);
+ }
+ }
+ }
+
+ if (has_valid_frame()) {
+ // Check if we need to rethrow the exception.
+ JumpTarget exit;
+ __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+ exit.Branch(not_equal);
+
+ // Rethrow exception.
+ frame_->EmitPush(rax); // undo pop from above
+ frame_->CallRuntime(Runtime::kReThrow, 1);
+
+ // Done.
+ exit.Bind();
+ }
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Spill everything, even constants, to the frame.
+ frame_->SpillAll();
+ frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ // Ignore the return value.
+#endif
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ // Call the runtime to instantiate the function boilerplate object.
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
+ ASSERT(boilerplate->IsBoilerplate());
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Push the boilerplate on the stack.
+ __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+
+ // Create a new closure.
+ frame_->EmitPush(rsi);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
}
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ InstantiateBoilerplate(boilerplate);
+}
+
+
void CodeGenerator::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* a) {
- UNIMPLEMENTED();
+ FunctionBoilerplateLiteral* node) {
+ Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+ InstantiateBoilerplate(node->boilerplate());
}
-void CodeGenerator::VisitConditional(Conditional* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+ Comment cmnt(masm_, "[ Conditional");
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
+ ControlDestination dest(&then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // The else target was bound, so we compile the else part first.
+ Load(node->else_expression(), typeof_state());
+
+ if (then.is_linked()) {
+ exit.Jump();
+ then.Bind();
+ Load(node->then_expression(), typeof_state());
+ }
+ } else {
+ // The then target was bound, so we compile the then part first.
+ Load(node->then_expression(), typeof_state());
+
+ if (else_.is_linked()) {
+ exit.Jump();
+ else_.Bind();
+ Load(node->else_expression(), typeof_state());
+ }
+ }
+
+ exit.Bind();
}
-void CodeGenerator::VisitSlot(Slot* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlotCheckForArguments(node, typeof_state());
}
-void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
+ if (expr != NULL) {
+ Visit(expr);
+ } else {
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValue(typeof_state());
+ }
}
-void CodeGenerator::VisitLiteral(Literal* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+ Comment cmnt(masm_, "[ Literal");
+ frame_->Push(node->handle());
}
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
- UNIMPLEMENTED();
+
+// Materialize the regexp literal 'node' in the literals array
+// 'literals' of the function. Leave the regexp boilerplate in
+// 'boilerplate'.
+class DeferredRegExpLiteral: public DeferredCode {
+ public:
+ DeferredRegExpLiteral(Register boilerplate,
+ Register literals,
+ RegExpLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredRegExpLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ RegExpLiteral* node_;
+};
+
+
+void DeferredRegExpLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // RegExp pattern (2).
+ __ Push(node_->pattern());
+ // RegExp flags (3).
+ __ Push(node_->flags());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ Comment cmnt(masm_, "[ RegExp Literal");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the RegExp object. If so,
+ // jump to the deferred code passing the literals array.
+ DeferredRegExpLiteral* deferred =
+ new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
}
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
- UNIMPLEMENTED();
+
+// Materialize the object literal 'node' in the literals array
+// 'literals' of the function. Leave the object boilerplate in
+// 'boilerplate'.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+ DeferredObjectLiteral(Register boilerplate,
+ Register literals,
+ ObjectLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredObjectLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ Push(node_->constant_properties());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code passing the literals array.
+ DeferredObjectLiteral* deferred =
+ new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the boilerplate object.
+ frame_->Push(&boilerplate);
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ Result clone = frame_->CallRuntime(clone_function_id, 1);
+ // Push the newly cloned literal object as the result.
+ frame_->Push(&clone);
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+ // else fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ Handle<Object> key(property->key()->handle());
+ if (key->IsSymbol()) {
+ // Duplicate the object as the IC receiver.
+ frame_->Dup();
+ Load(property->value());
+ frame_->Push(key);
+ Result ignored = frame_->CallStoreIC();
+ // Drop the duplicated receiver and ignore the result.
+ frame_->Drop();
+ break;
+ }
+ // Fall through
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(1));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ // Duplicate the object as an argument to the runtime call.
+ frame_->Dup();
+ Load(property->key());
+ frame_->Push(Smi::FromInt(0));
+ Load(property->value());
+ Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ // Ignore the result.
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
}
-void CodeGenerator::VisitAssignment(Assignment* a) {
- UNIMPLEMENTED();
+
+// Materialize the array literal 'node' in the literals array 'literals'
+// of the function. Leave the array boilerplate in 'boilerplate'.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+ DeferredArrayLiteral(Register boilerplate,
+ Register literals,
+ ArrayLiteral* node)
+ : boilerplate_(boilerplate), literals_(literals), node_(node) {
+ set_comment("[ DeferredArrayLiteral");
+ }
+
+ void Generate();
+
+ private:
+ Register boilerplate_;
+ Register literals_;
+ ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+ // Since the entry is undefined we call the runtime system to
+ // compute the literal.
+ // Literal array (0).
+ __ push(literals_);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ // Constant properties (2).
+ __ Push(node_->literals());
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
}
-void CodeGenerator::VisitThrow(Throw* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ // Retrieve the literals array and check the allocated entry. Begin
+ // with a writable copy of the function of this activation in a
+ // register.
+ frame_->PushFunction();
+ Result literals = frame_->Pop();
+ literals.ToRegister();
+ frame_->Spill(literals.reg());
+
+ // Load the literals array of the function.
+ __ movq(literals.reg(),
+ FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ Result boilerplate = allocator_->Allocate();
+ ASSERT(boilerplate.is_valid());
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code passing the literals array.
+ DeferredArrayLiteral* deferred =
+ new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
+ __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ deferred->Branch(equal);
+ deferred->BindExit();
+ literals.Unuse();
+
+ // Push the resulting array literal boilerplate on the stack.
+ frame_->Push(&boilerplate);
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ Result clone = frame_->CallRuntime(clone_function_id, 1);
+ // Push the newly cloned literal object as the result.
+ frame_->Push(&clone);
+
+ // Generate code to set the elements in the array that are not
+ // literals.
+ for (int i = 0; i < node->values()->length(); i++) {
+ Expression* value = node->values()->at(i);
+
+ // If value is a literal the property value is already set in the
+ // boilerplate object.
+ if (value->AsLiteral() != NULL) continue;
+ // If value is a materialized literal the property value is already set
+ // in the boilerplate object if it is simple.
+ if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+ // The property must be set by generated code.
+ Load(value);
+
+ // Get the property value off the stack.
+ Result prop_value = frame_->Pop();
+ prop_value.ToRegister();
+
+ // Fetch the array literal while leaving a copy on the stack and
+ // use it to get the elements array.
+ frame_->Dup();
+ Result elements = frame_->Pop();
+ elements.ToRegister();
+ frame_->Spill(elements.reg());
+ // Get the elements FixedArray.
+ __ movq(elements.reg(),
+ FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+ // Update the write barrier for the array address.
+ frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
+ }
}
-void CodeGenerator::VisitProperty(Property* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+ ASSERT(!in_spilled_code());
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ Load(node->key());
+ Load(node->value());
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->Push(&result);
}
-void CodeGenerator::VisitCall(Call* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+ Comment cmnt(masm_, "[ Assignment");
+ CodeForStatementPosition(node);
+
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+
+ if (node->starts_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // Change to slow case in the beginning of an initialization
+ // block to avoid the quadratic behavior of repeatedly adding
+ // fast properties.
+
+ // The receiver is the argument to the runtime call. It is the
+ // first value pushed when the reference was loaded to the
+ // frame.
+ // TODO(X64): Enable this and the switch back to fast, once they work.
+ // frame_->PushElementAt(target.size() - 1);
+ // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ Load(node->value());
+
+ } else {
+ // Literal* literal = node->value()->AsLiteral();
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ // Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ // There are two cases where the target is not read in the right hand
+ // side, that are easy to test for: the right hand side is a literal,
+ // or the right hand side is a different variable. TakeValue invalidates
+ // the target, with an implicit promise that it will be written to again
+ // before it is read.
+ // TODO(X64): Implement TakeValue optimization. Check issue 150016.
+ if (false) {
+ // if (literal != NULL || (right_var != NULL && right_var != var)) {
+ // target.TakeValue(NOT_INSIDE_TYPEOF);
+ } else {
+ target.GetValue(NOT_INSIDE_TYPEOF);
+ }
+ Load(node->value());
+ GenericBinaryOperation(node->binary_op(),
+ node->type(),
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ }
+
+ if (var != NULL &&
+ var->mode() == Variable::CONST &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ } else {
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
+ if (node->ends_initialization_block()) {
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ // End of initialization block. Revert to fast case. The
+ // argument to the runtime call is the receiver, which is the
+ // first value pushed as part of the reference, which is below
+ // the lhs value.
+ // TODO(X64): Enable this once ToFastProperties works.
+ // frame_->PushElementAt(target.size());
+ // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+ }
+ }
}
-void CodeGenerator::VisitCallEval(CallEval* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitThrow(Throw* node) {
+ Comment cmnt(masm_, "[ Throw");
+ CodeForStatementPosition(node);
+
+ Load(node->exception());
+ Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->Push(&result);
}
-void CodeGenerator::VisitCallNew(CallNew* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitProperty(Property* node) {
+ Comment cmnt(masm_, "[ Property");
+ Reference property(this, node);
+ property.GetValue(typeof_state());
}
-void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCall(Call* node) {
+ Comment cmnt(masm_, "[ Call");
+
+ ZoneList<Expression*>* args = node->arguments();
+
+ CodeForStatementPosition(node);
+
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(var->name());
+
+ // Pass the global object as the receiver and let the IC stub
+ // patch the stack to use the global proxy as 'this' in the
+ // invoked function.
+ LoadGlobal();
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // ----------------------------------
+ // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
+ // ----------------------------------
+
+ // Load the function from the context. Sync the frame so we can
+ // push the arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(var->name());
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ // The runtime call returns a pair of values in rax and rdx. The
+ // looked-up function is in rax and the receiver is in rdx. These
+ // register references are not ref counted here. We spill them
+ // eagerly since they are arguments to an inevitable call (and are
+ // not sharable by the arguments).
+ ASSERT(!allocator()->is_used(rax));
+ frame_->EmitPush(rax);
+
+ // Load the receiver.
+ ASSERT(!allocator()->is_used(rdx));
+ frame_->EmitPush(rdx);
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
+
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
+
+ // TODO(X64): Consider optimizing Function.prototype.apply calls
+ // with arguments object. Requires lazy arguments allocation;
+ // see http://codereview.chromium.org/147075.
+
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(literal->handle());
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result =
+ frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
+
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+
+ // Pass receiver to called function.
+ if (property->is_synthetic()) {
+ // Use global object as receiver.
+ LoadGlobalReceiver();
+ } else {
+ // The reference's size is non-negative.
+ frame_->PushElementAt(ref.size());
+ }
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
+
+ // Load the function.
+ Load(function);
+
+ // Pass the global proxy as the receiver.
+ LoadGlobalReceiver();
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ }
}
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
- UNIMPLEMENTED();
+
+void CodeGenerator::VisitCallEval(CallEval* node) {
+ Comment cmnt(masm_, "[ CallEval");
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
+ // the function we need to call and the receiver of the call.
+ // Then we call the resolved function using the given arguments.
+
+ ZoneList<Expression*>* args = node->arguments();
+ Expression* function = node->expression();
+
+ CodeForStatementPosition(node);
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ movl(scratch.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ movl(result.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+ Comment cmnt(masm_, "[ CallNew");
+ CodeForStatementPosition(node);
+
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
+
+ // Compute function to call and use the global object as the
+ // receiver. There is no need to use the global proxy here because
+ // it will always be replaced with a newly allocated object.
+ Load(node->expression());
+ LoadGlobal();
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallConstructor(arg_count);
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ if (CheckForInlineRuntimeCall(node)) {
+ return;
+ }
+
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
+
+ if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ frame_->Push(node->name());
+ // Push the builtins object found in the current global object.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), GlobalObject());
+ __ movq(temp.reg(),
+ FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+ frame_->Push(&temp);
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ if (function == NULL) {
+ // Call the JS runtime function.
+ Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting_);
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+ } else {
+ // Call the C runtime function.
+ Result answer = frame_->CallRuntime(function, arg_count);
+ frame_->Push(&answer);
+ }
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ // Note that because of NOT and an optimization in comparison of a typeof
+ // expression to a literal string, this function can fail to leave a value
+ // on top of the frame or in the cc register.
+ Comment cmnt(masm_, "[ UnaryOperation");
+
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ // Swap the true and false targets but keep the same actual label
+ // as the fall through.
+ destination()->Invert();
+ LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ // Swap the labels back.
+ destination()->Invert();
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ if (property != NULL) {
+ Load(property->obj());
+ Load(property->key());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
+
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ frame_->Push(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Call the runtime to look up the context holding the named
+ // variable. Sync the virtual frame eagerly so we can push the
+ // arguments directly into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(variable->name());
+ Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+ ASSERT(context.is_register());
+ frame_->EmitPush(context.reg());
+ context.Unuse();
+ frame_->EmitPush(variable->name());
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+ CALL_FUNCTION, 2);
+ frame_->Push(&answer);
+ return;
+ }
+
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
+
+ } else {
+ // Default: Result of deleting expressions is true.
+ Load(node->expression()); // may have side-effects
+ frame_->SetElementAt(0, Factory::true_value());
+ }
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->Push(&answer);
+
+ } else if (op == Token::VOID) {
+ Expression* expression = node->expression();
+ if (expression && expression->AsLiteral() && (
+ expression->AsLiteral()->IsTrue() ||
+ expression->AsLiteral()->IsFalse() ||
+ expression->AsLiteral()->handle()->IsNumber() ||
+ expression->AsLiteral()->handle()->IsString() ||
+ expression->AsLiteral()->handle()->IsJSRegExp() ||
+ expression->AsLiteral()->IsNull())) {
+ // Omit evaluating the value of the primitive literal.
+ // It will be discarded anyway, and can have no side effect.
+ frame_->Push(Factory::undefined_value());
+ } else {
+ Load(node->expression());
+ frame_->SetElementAt(0, Factory::undefined_value());
+ }
+
+ } else {
+ Load(node->expression());
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
+
+ case Token::SUB: {
+ bool overwrite =
+ (node->AsBinaryOperation() != NULL &&
+ node->AsBinaryOperation()->ResultOverwriteAllowed());
+ UnarySubStub stub(overwrite);
+ // TODO(1222589): remove dependency of TOS being cached inside stub
+ Result operand = frame_->Pop();
+ Result answer = frame_->CallStub(&stub, &operand);
+ frame_->Push(&answer);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ // Smi check.
+ JumpTarget smi_label;
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ smi_label.Branch(zero, &operand);
+
+ frame_->Push(&operand); // undo popping of TOS
+ Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+ CALL_FUNCTION, 1);
+ continue_label.Jump(&answer);
+ smi_label.Bind(&answer);
+ answer.ToRegister();
+ frame_->Spill(answer.reg());
+ __ not_(answer.reg());
+ // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
+ __ xor_(answer.reg(), Immediate(kSmiTagMask));
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
+ break;
+ }
+
+ case Token::ADD: {
+ // Smi check.
+ JumpTarget continue_label;
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ continue_label.Branch(zero, &operand, taken);
+
+ frame_->Push(&operand);
+ Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+ CALL_FUNCTION, 1);
+
+ continue_label.Bind(&answer);
+ frame_->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation, call
+// into the runtime to convert the argument to a number, and call the
+// specialized add or subtract stub. The result is left in dst.
+class DeferredPrefixCountOperation: public DeferredCode {
+ public:
+ DeferredPrefixCountOperation(Register dst, bool is_increment)
+ : dst_(dst), is_increment_(is_increment) {
+ set_comment("[ DeferredCountOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ bool is_increment_;
+};
+
+
+void DeferredPrefixCountOperation::Generate() {
+ // Undo the optimistic smi operation.
+ if (is_increment_) {
+ __ subq(dst_, Immediate(Smi::FromInt(1)));
+ } else {
+ __ addq(dst_, Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (is_increment_) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The value in dst was optimistically incremented or decremented. The
+// result overflowed or was not smi tagged. Undo the operation and call
+// into the runtime to convert the argument to a number. Update the
+// original value in old. Call the specialized add or subtract stub.
+// The result is left in dst.
+class DeferredPostfixCountOperation: public DeferredCode {
+ public:
+ DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
+ : dst_(dst), old_(old), is_increment_(is_increment) {
+ set_comment("[ DeferredCountOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Register old_;
+ bool is_increment_;
+};
+
+
+void DeferredPostfixCountOperation::Generate() {
+ // Undo the optimistic smi operation.
+ if (is_increment_) {
+ __ subq(dst_, Immediate(Smi::FromInt(1)));
+ } else {
+ __ addq(dst_, Immediate(Smi::FromInt(1)));
+ }
+ __ push(dst_);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ // Save the result of ToNumber to use as the old value.
+ __ push(rax);
+
+ // Call the runtime for the addition or subtraction.
+ __ push(rax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (is_increment_) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(old_);
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
+
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+ // Postfix operations need a stack slot under the reference to hold
+ // the old value while the new value is being stored. This is so that
+ // in the case that storing the new value requires a call, the old
+ // value will be in the frame to be spilled.
+ if (is_postfix) frame_->Push(Smi::FromInt(0));
+
+ { Reference target(this, node->expression());
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) frame_->Push(Smi::FromInt(0));
+ return;
+ }
+ target.TakeValue(NOT_INSIDE_TYPEOF);
+
+ Result new_value = frame_->Pop();
+ new_value.ToRegister();
+
+ Result old_value; // Only allocated in the postfix case.
+ if (is_postfix) {
+ // Allocate a temporary to preserve the old value.
+ old_value = allocator_->Allocate();
+ ASSERT(old_value.is_valid());
+ __ movq(old_value.reg(), new_value.reg());
+ }
+ // Ensure the new value is writable.
+ frame_->Spill(new_value.reg());
+
+ // In order to combine the overflow and the smi tag check, we need
+ // to be able to allocate a byte register. We attempt to do so
+ // without spilling. If we fail, we will generate separate overflow
+ // and smi tag checks.
+ //
+ // We allocate and clear the temporary register before
+ // performing the count operation since clearing the register using
+ // xor will clear the overflow flag.
+ Result tmp = allocator_->AllocateWithoutSpilling();
+
+ // Clear scratch register to prepare it for setcc after the operation below.
+ __ xor_(kScratchRegister, kScratchRegister);
+
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment);
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment);
+ }
+
+ if (is_increment) {
+ __ addq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ } else {
+ __ subq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ }
+
+ // If the count operation didn't overflow and the result is a valid
+ // smi, we're done. Otherwise, we jump to the deferred slow-case
+ // code.
+
+ // We combine the overflow and the smi tag check.
+ __ setcc(overflow, kScratchRegister);
+ __ or_(kScratchRegister, new_value.reg());
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ tmp.Unuse();
+ deferred->Branch(not_zero);
+
+ deferred->BindExit();
+
+ // Postfix: store the old value in the allocated slot under the
+ // reference.
+ if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
+
+ frame_->Push(&new_value);
+ // Non-constant: update the reference.
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
+ }
+
+ // Postfix: drop the new value and use the old.
+ if (is_postfix) frame_->Drop();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ // TODO(X64): This code was copied verbatim from codegen-ia32.
+ // Either find a reason to change it or move it to a shared location.
+
+ // Note that due to an optimization in comparison operations (typeof
+ // compared to a string literal), we can evaluate a binary expression such
+ // as AND or OR and not leave a value on the frame or in the cc register.
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = node->op();
+
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+
+ // NOTE: If the left hand side produces a materialized value (not
+ // control flow), we force the right hand side to do the same. This
+ // is necessary because we assume that if we get control flow on the
+ // last path out of an expression we got it on all paths.
+ if (op == Token::AND) {
+ JumpTarget is_true;
+ ControlDestination dest(&is_true, destination()->false_target(), true);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The current false target was used as the fall-through. If
+ // there are no dangling jumps to is_true then the left
+ // subexpression was unconditionally false. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_true.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current false target was a forward jump then we have a
+ // valid frame, we have just bound the false target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->false_target()->Unuse();
+ destination()->false_target()->Jump();
+ }
+ is_true.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have actually just jumped to or bound the current false
+ // target but the current control destination is not marked as
+ // used.
+ destination()->Use(false);
+ }
+
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_true
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_true
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&pop_and_continue, &exit, true);
+ ToBoolean(&dest);
+
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
+
+ // Compile right side expression.
+ is_true.Bind();
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else if (op == Token::OR) {
+ JumpTarget is_false;
+ ControlDestination dest(destination()->true_target(), &is_false, false);
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+
+ if (dest.true_was_fall_through()) {
+ // The current true target was used as the fall-through. If
+ // there are no dangling jumps to is_false then the left
+ // subexpression was unconditionally true. Otherwise we have
+ // paths where we do have to evaluate the right subexpression.
+ if (is_false.is_linked()) {
+ // We need to compile the right subexpression. If the jump to
+ // the current true target was a forward jump then we have a
+ // valid frame, we have just bound the true target, and we
+ // have to jump around the code for the right subexpression.
+ if (has_valid_frame()) {
+ destination()->true_target()->Unuse();
+ destination()->true_target()->Jump();
+ }
+ is_false.Bind();
+ // The left subexpression compiled to control flow, so the
+ // right one is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ } else {
+ // We have just jumped to or bound the current true target but
+ // the current control destination is not marked as used.
+ destination()->Use(true);
+ }
+
+ } else if (dest.is_used()) {
+ // The left subexpression compiled to control flow (and is_false
+ // was just bound), so the right is free to do so as well.
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+
+ } else {
+ // We have a materialized value on the frame, so we exit with
+ // one on all paths. There are possibly also jumps to is_false
+ // from nested subexpressions.
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ //
+ // Duplicate the TOS value. The duplicate will be popped by
+ // ToBoolean.
+ frame_->Dup();
+ ControlDestination dest(&exit, &pop_and_continue, false);
+ ToBoolean(&dest);
+
+ // Pop the result of evaluating the first part.
+ frame_->Drop();
+
+ // Compile right side expression.
+ is_false.Bind();
+ Load(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else {
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ overwrite_mode = OVERWRITE_RIGHT;
+ }
+
+ Load(node->left());
+ Load(node->right());
+ GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+ }
+}
+
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+ // To make typeof testing for natives implemented in JavaScript really
+ // efficient, we generate special code for expressions of the form:
+ // 'typeof <expression> == <string>'.
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
+
+ // Load the operand and move it to a register.
+ LoadTypeofExpression(operation->expression());
+ Result answer = frame_->Pop();
+ answer.ToRegister();
+
+ if (check->Equals(Heap::number_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->true_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ Cmp(answer.reg(), Factory::heap_number_map());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+
+ // It can be an undetectable string object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
+ answer.Unuse();
+ destination()->Split(below); // Unsigned byte comparison needed.
+
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ Cmp(answer.reg(), Factory::true_value());
+ destination()->true_target()->Branch(equal);
+ __ Cmp(answer.reg(), Factory::false_value());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ Cmp(answer.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
+
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ answer.Unuse();
+ destination()->Split(not_zero);
+
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ frame_->Spill(answer.reg());
+ __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ answer.Unuse();
+ destination()->Split(equal);
+
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ __ Cmp(answer.reg(), Factory::null_value());
+ destination()->true_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ __ movq(kScratchRegister,
+ FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ movb(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ destination()->false_target()->Branch(below);
+ __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ answer.Unuse();
+ destination()->Split(below_equal);
+ } else {
+ // Uncommon case: typeof testing against a string literal that is
+ // never returned from the typeof operator.
+ answer.Unuse();
+ destination()->Goto(false);
+ }
+ return;
+ }
+
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN: {
+ Load(left);
+ Load(right);
+ Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+ frame_->Push(&answer); // push the result
+ return;
+ }
+ case Token::INSTANCEOF: {
+ Load(left);
+ Load(right);
+ InstanceofStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ answer.ToRegister();
+ __ testq(answer.reg(), answer.reg());
+ answer.Unuse();
+ destination()->Split(zero);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
+ Load(left);
+ Load(right);
+ Comparison(cc, strict, destination());
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->PushFunction();
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
+ Load(args->at(0));
+ Result key = frame_->Pop();
+ // Explicitly create a constant result.
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ Result result = frame_->CallStub(&stub, &key, &count);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+ // It is a heap object - get map.
+ // Check if the object is a JS array or not.
+ __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ value.Unuse();
+ destination()->Split(equal);
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ // TODO(X64): Optimize this like it's done on IA-32.
+ ASSERT(args->length() == 0);
+ Result answer = frame_->CallRuntime(Runtime::kIsConstructCall, 0);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ // ArgumentsAccessStub takes the parameter count as an input argument
+ // in register eax. Create a constant result for it.
+ Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+ // Call the shared stub to get to the arguments.length.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+ Result result = frame_->CallStub(&stub, &count);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
+ // TODO(X64): Implement this function.
+ // Ignore arguments and return undefined, to signal failure.
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(),
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ value.Unuse();
+ destination()->Split(zero);
}
-void CodeGenerator::VisitCountOperation(CountOperation* a) {
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ Load(args->at(1));
+ Load(args->at(2));
+ frame_->CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ Load(args->at(0));
+ Load(args->at(1));
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+ right.ToRegister();
+ left.ToRegister();
+ __ cmpq(right.reg(), left.reg());
+ right.Unuse();
+ left.Unuse();
+ destination()->Split(equal);
+}
+
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
UNIMPLEMENTED();
}
-void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ // TODO(X64): Optimize this like it's done on IA-32.
+ ASSERT(args->length() == 1);
+ Load(args->at(0)); // Load the object.
+ Result result = frame_->CallRuntime(Runtime::kClassOf, 1);
+ frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
+ Result value = frame_->Pop();
+ Result object = frame_->Pop();
+ value.ToRegister();
+ object.ToRegister();
+
+ // if (object->IsSmi()) return value.
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero, &value);
+
+ // It is a heap object - get its map.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ // if (!object->IsJSValue()) return value.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+ leave.Branch(not_equal, &value);
+
+ // Store the value.
+ __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ Result duplicate_value = allocator_->Allocate();
+ ASSERT(duplicate_value.is_valid());
+ __ movq(duplicate_value.reg(), value.reg());
+ // The object register is also overwritten by the write barrier and
+ // possibly aliased in the frame.
+ frame_->Spill(object.reg());
+ __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+ scratch.reg());
+ object.Unuse();
+ scratch.Unuse();
+ duplicate_value.Unuse();
+
+ // Leave.
+ leave.Bind(&value);
+ frame_->Push(&value);
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave;
+ Load(args->at(0)); // Load the object.
+ frame_->Dup();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ ASSERT(object.is_valid());
+ // if (object->IsSmi()) return object.
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ leave.Branch(zero);
+ // It is a heap object - get map.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ // if (!object->IsJSValue()) return object.
+ __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+ leave.Branch(not_equal);
+ __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+ object.Unuse();
+ frame_->SetElementAt(0, &temp);
+ leave.Bind();
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation of Expressions
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+ TypeofState typeof_state) {
+ // TODO(x64): No architecture specific code. Move to shared location.
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression, typeof_state);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ ControlDestination dest(&true_target, &false_target, true);
+ LoadCondition(x, typeof_state, &dest, false);
+
+ if (dest.false_was_fall_through()) {
+ // The false target was just bound.
+ JumpTarget loaded;
+ frame_->Push(Factory::false_value());
+ // There may be dangling jumps to the true target.
+ if (true_target.is_linked()) {
+ loaded.Jump();
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ loaded.Bind();
+ }
+
+ } else if (dest.is_used()) {
+ // There is true, and possibly false, control flow (with true as
+ // the fall through).
+ JumpTarget loaded;
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ loaded.Bind();
+ }
+
+ } else {
+ // We have a valid value on top of the frame, but we still may
+ // have dangling jumps to the true and false targets from nested
+ // subexpressions (eg, the left subexpressions of the
+ // short-circuited boolean operators).
+ ASSERT(has_valid_frame());
+ if (true_target.is_linked() || false_target.is_linked()) {
+ JumpTarget loaded;
+ loaded.Jump(); // Don't lose the current TOS.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ frame_->Push(Factory::true_value());
+ if (false_target.is_linked()) {
+ loaded.Jump();
+ }
+ }
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ frame_->Push(Factory::false_value());
+ }
+ loaded.Bind();
+ }
+ }
+
+ ASSERT(has_valid_frame());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
+void CodeGenerator::LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ ControlDestination* dest,
+ bool force_control) {
+ ASSERT(!in_spilled_code());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, typeof_state, dest);
+ Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ !dest->is_used() &&
+ frame_->height() == original_height) {
+ dest->Goto(true);
+ }
+ }
+
+ if (force_control && !dest->is_used()) {
+ // Convert the TOS value into flow to the control destination.
+ // TODO(X64): Make control flow to control destinations work.
+ ToBoolean(dest);
+ }
+
+ ASSERT(!(force_control && !dest->is_used()));
+ ASSERT(dest->is_used() || frame_->height() == original_height + 1);
+}
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
+// convert it to a boolean in the condition code register or jump to
+// 'false_target'/'true_target' as appropriate.
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
+ Comment cmnt(masm_, "[ ToBoolean");
+
+ // The value to convert should be popped from the frame.
+ Result value = frame_->Pop();
+ value.ToRegister();
+ // Fast case checks.
+
+ // 'false' => false.
+ __ Cmp(value.reg(), Factory::false_value());
+ dest->false_target()->Branch(equal);
+
+ // 'true' => true.
+ __ Cmp(value.reg(), Factory::true_value());
+ dest->true_target()->Branch(equal);
+
+ // 'undefined' => false.
+ __ Cmp(value.reg(), Factory::undefined_value());
+ dest->false_target()->Branch(equal);
+
+ // Smi => false iff zero.
+ ASSERT(kSmiTag == 0);
+ __ testq(value.reg(), value.reg());
+ dest->false_target()->Branch(zero);
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ dest->true_target()->Branch(zero);
+
+ // Call the stub for all other cases.
+ frame_->Push(&value); // Undo the Pop() from above.
+ ToBooleanStub stub;
+ Result temp = frame_->CallStub(&stub, 1);
+ // Convert the result to a condition code.
+ __ testq(temp.reg(), temp.reg());
+ temp.Unuse();
+ dest->Split(not_equal);
+}
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
UNIMPLEMENTED();
+ // TODO(X64): Implement security policy for loads of smis.
+}
+
+
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+ return false;
+}
+
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ cgen_->UnloadReference(this);
}
-void CodeGenerator::VisitThisFunction(ThisFunction* a) {
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ // References are loaded from both spilled and unspilled code. Set the
+ // state to unspilled to allow that (and explicitly spill after
+ // construction at the construction sites).
+ bool was_in_spilled_code = in_spilled_code_;
+ in_spilled_code_ = false;
+
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ Load(property->obj());
+ // We use a named reference if the key is a literal symbol, unless it is
+ // a string that can be legally parsed as an integer. This is because
+ // otherwise we will not get into the slow case code that handles [] on
+ // String objects.
+ Literal* literal = property->key()->AsLiteral();
+ uint32_t dummy;
+ if (literal != NULL &&
+ literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ Load(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ Load(e);
+ // frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+
+ in_spilled_code_ = was_in_spilled_code;
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ frame_->Nip(ref->size());
+}
+
+
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
+
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(rsi)); // do not overwrite context register
+ Register context = rsi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(rsp, 0);
+ }
+}
+
+
+Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
+ Result tmp,
+ JumpTarget* slow) {
UNIMPLEMENTED();
+ return Operand(rsp, 0);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ JumpTarget slow;
+ JumpTarget done;
+ Result value;
+
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+ // If there was no control flow to slow, we can exit early.
+ if (!slow.is_linked()) {
+ frame_->Push(&value);
+ return;
+ }
+
+ done.Jump(&value);
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ // Only generate the fast case for locals that rewrite to slots.
+ // This rules out argument loads.
+ if (potential_slot != NULL) {
+ // Allocate a fresh register to use as a temp in
+ // ContextSlotOperandCheckExtensions and to hold the result
+ // value.
+ value = allocator_->Allocate();
+ ASSERT(value.is_valid());
+ __ movq(value.reg(),
+ ContextSlotOperandCheckExtensions(potential_slot,
+ value,
+ &slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ done.Branch(not_equal, &value);
+ __ movq(value.reg(), Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ }
+ // There is always control flow to slow from
+ // ContextSlotOperandCheckExtensions so we have to jump around
+ // it.
+ done.Jump(&value);
+ }
+ }
+
+ slow.Bind();
+ // A runtime call is inevitable. We eagerly sync frame elements
+ // to memory so that we can push the arguments directly into place
+ // on top of the frame.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(rsi);
+ __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
+ frame_->EmitPush(kScratchRegister);
+ if (typeof_state == INSIDE_TYPEOF) {
+ value =
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
+
+ done.Bind(&value);
+ frame_->Push(&value);
+
+ } else if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ //
+ // We currently spill the virtual frame because constants use the
+ // potentially unsafe direct-frame access of SlotOperand.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Load const");
+ JumpTarget exit;
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ Cmp(rcx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ __ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+ exit.Bind();
+ frame_->EmitPush(rcx);
+
+ } else if (slot->type() == Slot::PARAMETER) {
+ frame_->PushParameterAt(slot->index());
+
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(slot->index());
+
+ } else {
+ // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+ // here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because it will always be a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ // TODO(X64): Enable more types of slot.
+
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call. Since the call is inevitable,
+ // we eagerly sync the virtual frame so we can directly push the
+ // arguments into place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(slot->var()->name());
+
+ Result value;
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame_->Push(&value);
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ //
+ // We spill the frame in the code below because the direct-frame
+ // access of SlotOperand is potentially unsafe with an unspilled
+ // frame.
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Init const");
+ __ movq(rcx, SlotOperand(slot, rcx));
+ __ Cmp(rcx, Factory::the_hole_value());
+ exit.Branch(not_equal);
+ }
+
+ // We must execute the store. Storing a variable must keep the (new)
+ // value on the stack. This is necessary for compiling assignment
+ // expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this code.
+ if (slot->type() == Slot::PARAMETER) {
+ frame_->StoreToParameterAt(slot->index());
+ } else if (slot->type() == Slot::LOCAL) {
+ frame_->StoreToLocalAt(slot->index());
+ } else {
+ // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+ //
+ // The use of SlotOperand below is safe for an unspilled frame
+ // because the slot is a context slot.
+ ASSERT(slot->type() == Slot::CONTEXT);
+ frame_->Dup();
+ Result value = frame_->Pop();
+ value.ToRegister();
+ Result start = allocator_->Allocate();
+ ASSERT(start.is_valid());
+ __ movq(SlotOperand(slot, start.reg()), value.reg());
+ // RecordWrite may destroy the value registers.
+ //
+ // TODO(204): Avoid actually spilling when the value is not
+ // needed (probably the common case).
+ frame_->Spill(value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+ // The results start, value, and temp are unused by going out of
+ // scope.
+ }
+
+ exit.Bind();
+ }
+}
+
+
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = rsi;
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(tmp.reg())) {
+ __ movq(tmp.reg(), context);
+ }
+ // Load map for comparison into register, outside loop.
+ __ Move(kScratchRegister, Factory::global_context_map());
+ __ bind(&next);
+ // Terminate at global context.
+ __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+ __ j(equal, &fast);
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal);
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+ tmp.Unuse();
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ LoadGlobal();
+ frame_->Push(slot->var()->name());
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test eax
+ // instruction here.
+ __ nop();
+ // Discard the global object. The result is in answer.
+ frame_->Drop();
+ return answer;
+}
+
+
+void CodeGenerator::LoadGlobal() {
+ if (in_spilled_code()) {
+ frame_->EmitPush(GlobalObject());
+ } else {
+ Result temp = allocator_->Allocate();
+ __ movq(temp.reg(), GlobalObject());
+ frame_->Push(&temp);
+ }
+}
+
+
+void CodeGenerator::LoadGlobalReceiver() {
+ Result temp = allocator_->Allocate();
+ Register reg = temp.reg();
+ __ movq(reg, GlobalObject());
+ __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->Push(&temp);
+}
+
+
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
+ if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+ ASSERT(scope_->arguments_shadow() != NULL);
+ // We don't want to do lazy arguments allocation for functions that
+ // have heap-allocated contexts, because it interfers with the
+ // uninitialized const tracking in the context objects.
+ return (scope_->num_heap_slots() > 0)
+ ? EAGER_ARGUMENTS_ALLOCATION
+ : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+Result CodeGenerator::StoreArgumentsObject(bool initial) {
+ ArgumentsAllocationMode mode = ArgumentsMode();
+ ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+ Comment cmnt(masm_, "[ store arguments object");
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+ // When using lazy arguments allocation, we store the hole value
+ // as a sentinel indicating that the arguments object hasn't been
+ // allocated yet.
+ frame_->Push(Factory::the_hole_value());
+ } else {
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ frame_->PushFunction();
+ frame_->PushReceiverSlotAddress();
+ frame_->Push(Smi::FromInt(scope_->num_parameters()));
+ Result result = frame_->CallStub(&stub, 3);
+ frame_->Push(&result);
+ }
+
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ Reference arguments_ref(this, scope_->arguments());
+ ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
+ // Here we rely on the convenient property that references to slot
+ // take up zero space in the frame (ie, it doesn't matter that the
+ // stored value is actually below the reference on the frame).
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result arguments = frame_->Pop();
+ if (arguments.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !arguments.handle()->IsTheHole();
+ } else {
+ __ Cmp(arguments.reg(), Factory::the_hole_value());
+ arguments.Unuse();
+ done.Branch(not_equal);
+ }
+ }
+ if (!skip_arguments) {
+ arguments_ref.SetValue(NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ shadow_ref.SetValue(NOT_CONST_INIT);
+ }
+ return frame_->Pop();
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+ Variable* variable = x->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // NOTE: This is somewhat nasty. We force the compiler to load
+ // the variable as if through '<global>.<variable>' to make sure we
+ // do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ // TODO(1241834): Fetch the position from the variable instead of using
+ // no position.
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ Load(&property);
+ } else {
+ Load(x, INSIDE_TYPEOF);
+ }
+}
+
+
+void CodeGenerator::Comparison(Condition cc,
+ bool strict,
+ ControlDestination* dest) {
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == equal);
+
+ Result left_side;
+ Result right_side;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == greater || cc == less_equal) {
+ cc = ReverseCondition(cc);
+ left_side = frame_->Pop();
+ right_side = frame_->Pop();
+ } else {
+ right_side = frame_->Pop();
+ left_side = frame_->Pop();
+ }
+ ASSERT(cc == less || cc == equal || cc == greater_equal);
+
+ // If either side is a constant smi, optimize the comparison.
+ bool left_side_constant_smi =
+ left_side.is_constant() && left_side.handle()->IsSmi();
+ bool right_side_constant_smi =
+ right_side.is_constant() && right_side.handle()->IsSmi();
+ bool left_side_constant_null =
+ left_side.is_constant() && left_side.handle()->IsNull();
+ bool right_side_constant_null =
+ right_side.is_constant() && right_side.handle()->IsNull();
+
+ if (left_side_constant_smi || right_side_constant_smi) {
+ if (left_side_constant_smi && right_side_constant_smi) {
+ // Trivial case, comparing two constants.
+ int left_value = Smi::cast(*left_side.handle())->value();
+ int right_value = Smi::cast(*right_side.handle())->value();
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else { // Only one side is a constant Smi.
+ // If left side is a constant Smi, reverse the operands.
+ // Since one side is a constant Smi, conversion order does not matter.
+ if (left_side_constant_smi) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant Smi, inlining the case
+ // where both sides are Smis.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ __ testl(left_side.reg(), Immediate(kSmiTagMask));
+ is_smi.Branch(zero, taken);
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ testq(result.reg(), result.reg());
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
+ // Test smi equality and comparison by signed int comparison.
+ if (IsUnsafeSmi(right_side.handle())) {
+ right_side.ToRegister();
+ __ cmpq(left_side.reg(), right_side.reg());
+ } else {
+ __ Cmp(left_side.reg(), right_side.handle());
+ }
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
+ } else if (cc == equal &&
+ (left_side_constant_null || right_side_constant_null)) {
+ // To make null checks efficient, we check if either the left side or
+ // the right side is the constant 'null'.
+ // If so, we optimize the code by inlining a null check instead of
+ // calling the (very) general runtime routine for checking equality.
+ Result operand = left_side_constant_null ? right_side : left_side;
+ right_side.Unuse();
+ left_side.Unuse();
+ operand.ToRegister();
+ __ Cmp(operand.reg(), Factory::null_value());
+ if (strict) {
+ operand.Unuse();
+ dest->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ dest->true_target()->Branch(equal);
+ __ Cmp(operand.reg(), Factory::undefined_value());
+ dest->true_target()->Branch(equal);
+ __ testl(operand.reg(), Immediate(kSmiTagMask));
+ dest->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ dest->Split(not_zero);
+ }
+ } else { // Neither side is a constant Smi or null.
+ // If either side is a non-smi constant, skip the smi check.
+ bool known_non_smi =
+ (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+ (right_side.is_constant() && !right_side.handle()->IsSmi());
+ left_side.ToRegister();
+ right_side.ToRegister();
+
+ if (known_non_smi) {
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Both zero and sign flag right.
+ answer.Unuse();
+ dest->Split(cc);
+ } else {
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_smi;
+ Register left_reg = left_side.reg();
+ Register right_reg = right_side.reg();
+
+ __ movq(kScratchRegister, left_side.reg());
+ __ or_(kScratchRegister, right_side.reg());
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ is_smi.Branch(zero, taken);
+ // When non-smi, call out to the compare stub.
+ CompareStub stub(cc, strict);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ if (cc == equal) {
+ __ testq(answer.reg(), answer.reg());
+ } else {
+ __ cmpq(answer.reg(), Immediate(0));
+ }
+ answer.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ cmpq(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
+ }
+ }
+}
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in src register. Returns operand as floating point number
+ // in XMM register
+ static void LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst);
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers in XMM registers.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
+ // Code pattern for loading floating point values onto the fp stack.
+ // Input values must be either smi or heap number objects (fp values).
+ // Requirements:
+ // Register version: operands in registers lhs and rhs.
+ // Stack version: operands on TOS+1 and TOS+2.
+ // Returns operands as floating point numbers on fp stack.
+ static void LoadFloatOperands(MacroAssembler* masm);
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+ // Code pattern for loading a floating point value and converting it
+ // to a 32 bit integer. Input value must be either a smi or a heap number
+ // object.
+ // Returns operands as 32-bit sign extended integers in a general purpose
+ // registers.
+ static void LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in rax, operand_2 in rdx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float);
+ // Allocate a heap number in new space with undefined value.
+ // Returns tagged pointer in result, or jumps to need_gc if new space is full.
+ static void AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result);
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
+class DeferredInlineBinaryOperation: public DeferredCode {
+ public:
+ DeferredInlineBinaryOperation(Token::Value op,
+ Register dst,
+ Register left,
+ Register right,
+ OverwriteMode mode)
+ : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+ set_comment("[ DeferredInlineBinaryOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register left_;
+ Register right_;
+ OverwriteMode mode_;
+};
+
+
+void DeferredInlineBinaryOperation::Generate() {
+ __ push(left_);
+ __ push(right_);
+ GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+ SmiAnalysis* type,
+ OverwriteMode overwrite_mode) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Comment cmnt_token(masm_, Token::String(op));
+
+ if (op == Token::COMMA) {
+ // Simply discard left value.
+ frame_->Nip(1);
+ return;
+ }
+
+ // Set the flags based on the operation, type and loop nesting level.
+ GenericBinaryFlags flags;
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ flags = (loop_nesting() > 0)
+ ? SMI_CODE_INLINED
+ : SMI_CODE_IN_STUB;
+ break;
+
+ default:
+ // By default only inline the Smi check code for likely smis if this
+ // operation is part of a loop.
+ flags = ((loop_nesting() > 0) && type->IsLikelySmi())
+ ? SMI_CODE_INLINED
+ : SMI_CODE_IN_STUB;
+ break;
+ }
+
+ Result right = frame_->Pop();
+ Result left = frame_->Pop();
+
+ if (op == Token::ADD) {
+ bool left_is_string = left.is_constant() && left.handle()->IsString();
+ bool right_is_string = right.is_constant() && right.handle()->IsString();
+ if (left_is_string || right_is_string) {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ Result answer;
+ if (left_is_string) {
+ if (right_is_string) {
+ // TODO(lrn): if both are constant strings
+ // -- do a compile time cons, if allocation during codegen is allowed.
+ answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ } else {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+ }
+ } else if (right_is_string) {
+ answer =
+ frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+ }
+ frame_->Push(&answer);
+ return;
+ }
+ // Neither operand is known to be a string.
+ }
+
+ bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
+ bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
+ bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
+ bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+ bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
+
+ if (left_is_smi && right_is_smi) {
+ // Compute the constant result at compile time, and leave it on the frame.
+ int left_int = Smi::cast(*left.handle())->value();
+ int right_int = Smi::cast(*right.handle())->value();
+ if (FoldConstantSmis(op, left_int, right_int)) return;
+ }
+
+ if (left_is_non_smi || right_is_non_smi) {
+ // Set flag so that we go straight to the slow case, with no smi code.
+ generate_no_smi_code = true;
+ } else if (right_is_smi) {
+ ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
+ return;
+ } else if (left_is_smi) {
+ ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
+ return;
+ }
+
+ if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+ LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ // If we know the arguments aren't smis, use the binary operation stub
+ // that does not check for the fast smi case.
+ // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
+ if (generate_no_smi_code) {
+ flags = SMI_CODE_INLINED;
+ }
+ GenericBinaryOpStub stub(op, overwrite_mode, flags);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+ }
+}
+
+
+// Emit a LoadIC call to get the value from receiver and leave it in
+// dst. The receiver register is restored after the call.
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+ DeferredReferenceGetNamedValue(Register dst,
+ Register receiver,
+ Handle<String> name)
+ : dst_(dst), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceGetNamedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+ __ push(receiver_);
+ __ Move(rcx, name_);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a test rax instruction to indicate
+ // that the inobject property case was inlined.
+ //
+ // Store the delta to the map check instruction here in the test
+ // instruction. Use masm_-> instead of the __ macro since the
+ // latter can't return a value.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testq(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(receiver_);
+}
+
+
+
+
+// The result of src + value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAdd::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ __ subq(dst_, Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiAddReversed::Generate() {
+ // Undo the optimistic add operation and call the shared stub.
+ __ subq(dst_, Immediate(value_));
+ __ push(Immediate(value_));
+ __ push(dst_);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+// The result of src - value is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative subtraction and call the
+// appropriate specialized stub for subtract. The result is left in
+// dst.
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiSub::Generate() {
+ // Undo the optimistic sub operation and call the shared stub.
+ __ addq(dst_, Immediate(value_));
+ __ push(dst_);
+ __ push(Immediate(value_));
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+ __ CallStub(&igostub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ SmiAnalysis* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
+ // NOTE: This is an attempt to inline (a bit) more of the code for
+ // some possible smi operations (like + and -) when (at least) one
+ // of the operands is a constant smi.
+ // Consumes the argument "operand".
+
+ // TODO(199): Optimize some special cases of operations involving a
+ // smi literal (multiply by 2, shift by 0, etc.).
+ if (IsUnsafeSmi(value)) {
+ Result unsafe_operand(value);
+ if (reversed) {
+ LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
+ } else {
+ LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
+ }
+ ASSERT(!operand->is_valid());
+ return;
+ }
+
+ // Get the literal value.
+ Smi* smi_value = Smi::cast(*value);
+
+ switch (op) {
+ case Token::ADD: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+
+ // Optimistically add. Call the specialized add stub if the
+ // result is not a smi or overflows.
+ DeferredCode* deferred = NULL;
+ if (reversed) {
+ deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ } else {
+ deferred = new DeferredInlineSmiAdd(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ }
+ __ movq(kScratchRegister, value, RelocInfo::NONE);
+ __ addl(operand->reg(), kScratchRegister);
+ deferred->Branch(overflow);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ deferred->BindExit();
+ frame_->Push(operand);
+ break;
+ }
+ // TODO(X64): Move other implementations from ia32 to here.
+ default: {
+ Result constant_operand(value);
+ if (reversed) {
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
+ }
+ break;
+ }
+ }
+ ASSERT(!operand->is_valid());
+}
+
+void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ // Special handling of div and mod because they use fixed registers.
+ if (op == Token::DIV || op == Token::MOD) {
+ // We need rax as the quotient register, rdx as the remainder
+ // register, neither left nor right in rax or rdx, and left copied
+ // to rax.
+ Result quotient;
+ Result remainder;
+ bool left_is_in_rax = false;
+ // Step 1: get rax for quotient.
+ if ((left->is_register() && left->reg().is(rax)) ||
+ (right->is_register() && right->reg().is(rax))) {
+ // One or both is in rax. Use a fresh non-rdx register for
+ // them.
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (fresh.reg().is(rdx)) {
+ remainder = fresh;
+ fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ }
+ if (left->is_register() && left->reg().is(rax)) {
+ quotient = *left;
+ *left = fresh;
+ left_is_in_rax = true;
+ }
+ if (right->is_register() && right->reg().is(rax)) {
+ quotient = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rax);
+ } else {
+ // Neither left nor right is in rax.
+ quotient = allocator_->Allocate(rax);
+ }
+ ASSERT(quotient.is_register() && quotient.reg().is(rax));
+ ASSERT(!(left->is_register() && left->reg().is(rax)));
+ ASSERT(!(right->is_register() && right->reg().is(rax)));
+
+ // Step 2: get rdx for remainder if necessary.
+ if (!remainder.is_valid()) {
+ if ((left->is_register() && left->reg().is(rdx)) ||
+ (right->is_register() && right->reg().is(rdx))) {
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ if (left->is_register() && left->reg().is(rdx)) {
+ remainder = *left;
+ *left = fresh;
+ }
+ if (right->is_register() && right->reg().is(rdx)) {
+ remainder = *right;
+ *right = fresh;
+ }
+ __ movq(fresh.reg(), rdx);
+ } else {
+ // Neither left nor right is in rdx.
+ remainder = allocator_->Allocate(rdx);
+ }
+ }
+ ASSERT(remainder.is_register() && remainder.reg().is(rdx));
+ ASSERT(!(left->is_register() && left->reg().is(rdx)));
+ ASSERT(!(right->is_register() && right->reg().is(rdx)));
+
+ left->ToRegister();
+ right->ToRegister();
+ frame_->Spill(rax);
+ frame_->Spill(rdx);
+
+ // Check that left and right are smi tagged.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ (op == Token::DIV) ? rax : rdx,
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ testl(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ // Use the quotient register as a scratch for the tag check.
+ if (!left_is_in_rax) __ movq(rax, left->reg());
+ left_is_in_rax = false; // About to destroy the value in rax.
+ __ or_(rax, right->reg());
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ testl(rax, Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+
+ if (!left_is_in_rax) __ movq(rax, left->reg());
+ // Sign extend rax into rdx:rax.
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(right->reg(), right->reg());
+ deferred->Branch(zero);
+ // Divide rdx:rax by the right operand.
+ __ idiv(right->reg());
+
+ // Complete the operation.
+ if (op == Token::DIV) {
+ // Check for negative zero result. If result is zero, and divisor
+ // is negative, return a floating point negative zero. The
+ // virtual frame is unchanged in this block, so local control flow
+ // can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ testq(left->reg(), left->reg());
+ __ j(not_zero, &non_zero_result);
+ __ testq(right->reg(), right->reg());
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by
+ // idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmpq(rax, Immediate(0x40000000));
+ deferred->Branch(equal);
+ // Check that the remainder is zero.
+ __ testq(rdx, rdx);
+ deferred->Branch(not_zero);
+ // Tag the result and store it in the quotient register.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&quotient);
+ } else {
+ ASSERT(op == Token::MOD);
+ // Check for a negative zero result. If the result is zero, and
+ // the dividend is negative, return a floating point negative
+ // zero. The frame is unchanged in this block, so local control
+ // flow can use a Label rather than a JumpTarget.
+ Label non_zero_result;
+ __ testq(rdx, rdx);
+ __ j(not_zero, &non_zero_result);
+ __ testq(left->reg(), left->reg());
+ deferred->Branch(negative);
+ __ bind(&non_zero_result);
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&remainder);
+ }
+ return;
+ }
+
+ // Special handling of shift operations because they use fixed
+ // registers.
+ if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
+ // Move left out of rcx if necessary.
+ if (left->is_register() && left->reg().is(rcx)) {
+ *left = allocator_->Allocate();
+ ASSERT(left->is_valid());
+ __ movq(left->reg(), rcx);
+ }
+ right->ToRegister(rcx);
+ left->ToRegister();
+ ASSERT(left->is_register() && !left->reg().is(rcx));
+ ASSERT(right->is_register() && right->reg().is(rcx));
+
+ // We will modify right, it must be spilled.
+ frame_->Spill(rcx);
+
+ // Use a fresh answer register to avoid spilling the left operand.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+ // Check that both operands are smis using the answer register as a
+ // temporary.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ rcx,
+ overwrite_mode);
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), rcx);
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Untag both operands.
+ __ movq(answer.reg(), left->reg());
+ __ sar(answer.reg(), Immediate(kSmiTagSize));
+ __ sar(rcx, Immediate(kSmiTagSize));
+ // Perform the operation.
+ switch (op) {
+ case Token::SAR:
+ __ sar(answer.reg());
+ // No checks of result necessary
+ break;
+ case Token::SHR: {
+ Label result_ok;
+ __ shr(answer.reg());
+ // Check that the *unsigned* result fits in a smi. Neither of
+ // the two high-order bits can be set:
+ // * 0x80000000: high bit would be lost when smi tagging.
+ // * 0x40000000: this number would convert to negative when smi
+ // tagging.
+ // These two cases can only happen with shifts by 0 or 1 when
+ // handed a valid smi. If the answer cannot be represented by a
+ // smi, restore the left and right arguments, and jump to slow
+ // case. The low bit of the left argument may be lost, but only
+ // in a case where it is dropped anyway.
+ __ testl(answer.reg(), Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(rcx, Immediate(kSmiTagSize));
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ case Token::SHL: {
+ Label result_ok;
+ __ shl(answer.reg());
+ // Check that the *signed* result fits in a smi.
+ __ cmpq(answer.reg(), Immediate(0xc0000000));
+ __ j(positive, &result_ok);
+ ASSERT(kSmiTag == 0);
+ __ shl(rcx, Immediate(kSmiTagSize));
+ deferred->Jump();
+ __ bind(&result_ok);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ // Smi-tag the result in answer.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ lea(answer.reg(),
+ Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&answer);
+ return;
+ }
+
+ // Handle the other binary operations.
+ left->ToRegister();
+ right->ToRegister();
+ // A newly allocated register answer is used to hold the answer. The
+ // registers containing left and right are not modified so they don't
+ // need to be spilled in the fast case.
+ Result answer = allocator_->Allocate();
+ ASSERT(answer.is_valid());
+
+ // Perform the smi tag check.
+ DeferredInlineBinaryOperation* deferred =
+ new DeferredInlineBinaryOperation(op,
+ answer.reg(),
+ left->reg(),
+ right->reg(),
+ overwrite_mode);
+ if (left->reg().is(right->reg())) {
+ __ testl(left->reg(), Immediate(kSmiTagMask));
+ } else {
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), right->reg());
+ ASSERT(kSmiTag == 0); // Adjust test if not the case.
+ __ testl(answer.reg(), Immediate(kSmiTagMask));
+ }
+ deferred->Branch(not_zero);
+ __ movq(answer.reg(), left->reg());
+ switch (op) {
+ case Token::ADD:
+ __ addl(answer.reg(), right->reg()); // Add optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::SUB:
+ __ subl(answer.reg(), right->reg()); // Subtract optimistically.
+ deferred->Branch(overflow);
+ break;
+
+ case Token::MUL: {
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // Remove smi tag from the left operand (but keep sign).
+ // Left-hand operand has been copied into answer.
+ __ sar(answer.reg(), Immediate(kSmiTagSize));
+ // Do multiplication of smis, leaving result in answer.
+ __ imull(answer.reg(), right->reg());
+ // Go slow on overflows.
+ deferred->Branch(overflow);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case. The frame is unchanged
+ // in this block, so local control flow can use a Label rather
+ // than a JumpTarget.
+ Label non_zero_result;
+ __ testq(answer.reg(), answer.reg());
+ __ j(not_zero, &non_zero_result);
+ __ movq(answer.reg(), left->reg());
+ __ or_(answer.reg(), right->reg());
+ deferred->Branch(negative);
+ __ xor_(answer.reg(), answer.reg()); // Positive 0 is correct.
+ __ bind(&non_zero_result);
+ break;
+ }
+
+ case Token::BIT_OR:
+ __ or_(answer.reg(), right->reg());
+ break;
+
+ case Token::BIT_AND:
+ __ and_(answer.reg(), right->reg());
+ break;
+
+ case Token::BIT_XOR:
+ __ xor_(answer.reg(), right->reg());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ deferred->BindExit();
+ left->Unuse();
+ right->Unuse();
+ frame_->Push(&answer);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+ break;
+ }
+
+ case NAMED: {
+ // TODO(1241834): Make sure that it is safe to ignore the
+ // distinction between expressions in a typeof and not in a
+ // typeof. If there is a chance that reference errors can be
+ // thrown below, we must distinguish between the two kinds of
+ // loads (typeof expression loads must not throw a reference
+ // error).
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ bool is_global = var != NULL;
+ ASSERT(!is_global || var->is_global());
+
+ // Do not inline the inobject property case for loads from the global
+ // object. Also do not inline for unoptimized code. This saves time
+ // in the code generator. Unoptimized code is toplevel code or code
+ // that is not in a loop.
+ if (is_global ||
+ cgen_->scope()->is_global_scope() ||
+ cgen_->loop_nesting() == 0) {
+ Comment cmnt(masm, "[ Load from named Property");
+ cgen_->frame()->Push(GetName());
+
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the
+ // inobject property case was inlined. Ensure that there is not
+ // a test rax instruction here.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ } else {
+ // Inline the inobject property case.
+ Comment cmnt(masm, "[ Inlined named property load");
+ Result receiver = cgen_->frame()->Pop();
+ receiver.ToRegister();
+
+ Result value = cgen_->allocator()->Allocate();
+ ASSERT(value.is_valid());
+ DeferredReferenceGetNamedValue* deferred =
+ new DeferredReferenceGetNamedValue(value.reg(),
+ receiver.reg(),
+ GetName());
+
+ // Check that the receiver is a heap object.
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ __ bind(deferred->patch_site());
+ // This is the map check instruction that will be patched (so we can't
+ // use the double underscore macro that may insert instructions).
+ // Initially use an invalid map to force a failure.
+ masm->Move(kScratchRegister, Factory::null_value());
+ masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ // This branch is always a forwards branch so it's always a fixed
+ // size which allows the assert below to succeed and patching to work.
+ deferred->Branch(not_equal);
+
+ // The delta from the patch label to the load offset must be
+ // statically known.
+ ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+ LoadIC::kOffsetToLoadInstruction);
+ // The initial (invalid) offset has to be large enough to force
+ // a 32-bit instruction encoding to allow patching with an
+ // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
+
+ __ IncrementCounter(&Counters::named_load_inline, 1);
+ deferred->BindExit();
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&value);
+ }
+ break;
+ }
+
+ case KEYED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+ Comment cmnt(masm, "[ Load from keyed Property");
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ bool is_global = var != NULL;
+ ASSERT(!is_global || var->is_global());
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+
+ // TODO(x64): Implement inlined loads for keyed properties.
+ // Comment cmnt(masm, "[ Load from keyed Property");
+
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Reference::TakeValue(TypeofState typeof_state) {
+ // TODO(X64): This function is completely architecture independent. Move
+ // it somewhere shared.
+
+ // For non-constant frame-allocated slots, we invalidate the value in the
+ // slot. For all others, we fall back on GetValue.
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(!is_illegal());
+ if (type_ != SLOT) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP ||
+ slot->type() == Slot::CONTEXT ||
+ slot->var()->mode() == Variable::CONST) {
+ GetValue(typeof_state);
+ return;
+ }
+
+ // Only non-constant, frame-allocated parameters and locals can reach
+ // here.
+ if (slot->type() == Slot::PARAMETER) {
+ cgen_->frame()->TakeParameterAt(slot->index());
+ } else {
+ ASSERT(slot->type() == Slot::LOCAL);
+ cgen_->frame()->TakeLocalAt(slot->index());
+ }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ MacroAssembler* masm = cgen_->masm();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->StoreToSlot(slot, init_state);
+ break;
+ }
+
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ cgen_->frame()->Push(GetName());
+ Result answer = cgen_->frame()->CallStoreIC();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+
+ // TODO(x64): Implement inlined version of keyed stores.
+
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ Cmp(rax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ __ j(above_equal, &not_string);
+ __ and_(rcx, Immediate(kStringSizeMask));
+ __ cmpq(rcx, Immediate(kShortStringTag));
+ __ j(not_equal, &true_result); // Empty string is always short.
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ shr(rdx, Immediate(String::kShortLengthShift));
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ Cmp(rdx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ // TODO(x64): Don't use fp stack, use MMX registers?
+ __ fldz(); // Load zero onto fp stack
+ // Load heap-number double value onto fp stack
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ fucompp(); // Compare and pop both values.
+ __ movq(kScratchRegister, rax);
+ __ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions.
+ __ testb(rax, Immediate(0x08)); // Test FP condition flag C3.
+ __ movq(rax, kScratchRegister);
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in rax.
+ __ bind(&true_result);
+ __ movq(rax, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ xor_(rax, rax);
+ __ ret(1 * kPointerSize);
+}
+
+
+bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
+ // TODO(X64): This method is identical to the ia32 version.
+ // Either find a reason to change it, or move it somewhere where it can be
+ // shared. (Notice: It assumes that a Smi can fit in an int).
+
+ Object* answer_object = Heap::undefined_value();
+ switch (op) {
+ case Token::ADD:
+ if (Smi::IsValid(left + right)) {
+ answer_object = Smi::FromInt(left + right);
+ }
+ break;
+ case Token::SUB:
+ if (Smi::IsValid(left - right)) {
+ answer_object = Smi::FromInt(left - right);
+ }
+ break;
+ case Token::MUL: {
+ double answer = static_cast<double>(left) * right;
+ if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
+ // If the product is zero and the non-zero factor is negative,
+ // the spec requires us to return floating point negative zero.
+ if (answer != 0 || (left >= 0 && right >= 0)) {
+ answer_object = Smi::FromInt(static_cast<int>(answer));
+ }
+ }
+ }
+ break;
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ answer_object = Smi::FromInt(left | right);
+ break;
+ case Token::BIT_AND:
+ answer_object = Smi::FromInt(left & right);
+ break;
+ case Token::BIT_XOR:
+ answer_object = Smi::FromInt(left ^ right);
+ break;
+
+ case Token::SHL: {
+ int shift_amount = right & 0x1F;
+ if (Smi::IsValid(left << shift_amount)) {
+ answer_object = Smi::FromInt(left << shift_amount);
+ }
+ break;
+ }
+ case Token::SHR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ unsigned_left >>= shift_amount;
+ if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
+ answer_object = Smi::FromInt(unsigned_left);
+ }
+ break;
+ }
+ case Token::SAR: {
+ int shift_amount = right & 0x1F;
+ unsigned int unsigned_left = left;
+ if (left < 0) {
+ // Perform arithmetic shift of a negative number by
+ // complementing number, logical shifting, complementing again.
+ unsigned_left = ~unsigned_left;
+ unsigned_left >>= shift_amount;
+ unsigned_left = ~unsigned_left;
+ } else {
+ unsigned_left >>= shift_amount;
+ }
+ ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
+ answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (answer_object == Heap::undefined_value()) {
+ return false;
+ }
+ frame_->Push(Handle<Object>(answer_object));
+ return true;
+}
+
+
+// End of CodeGenerator implementation.
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ Label slow;
+ Label done;
+ Label try_float;
+
+ // Check whether the value is a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ // TODO(X64): Add inline code that handles floats, as on ia32 platform.
+ __ j(not_zero, &slow);
+
+ // Enter runtime system if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ testq(rax, rax);
+ __ j(zero, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ movq(rdx, rax);
+ __ xor_(rax, rax);
+ __ subl(rax, rdx);
+ __ j(no_overflow, &done);
+ // Restore rax and enter runtime system.
+ __ movq(rax, rdx);
+
+ // Enter runtime system.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+ __ bind(&done);
+ __ StubReturn(1);
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label call_builtin, done;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
+ // Equality is almost reflexive (everything but NaN), so start by testing
+ // for "identity and not NaN".
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, &not_identical);
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ xor_(rax, rax);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read double representation into rax.
+ __ movq(rbx, 0x7ff0000000000000, RelocInfo::NONE);
+ __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Test that exponent bits are all set.
+ __ or_(rbx, rax);
+ __ cmpq(rbx, rax);
+ __ j(not_equal, &return_equal);
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ shl(rax, Immediate(12));
+ // If all bits in the mantissa are zero the number is Infinity, and
+ // we return zero. Otherwise it is a NaN, and we return non-zero.
+ // So just return rax.
+ __ ret(0);
+
+ __ bind(&not_identical);
+ }
+
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ movq(rcx, Immediate(kSmiTagMask));
+ __ and_(rcx, rax);
+ __ testq(rcx, rdx);
+ __ j(not_zero, &not_smis);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ ASSERT_EQ(1, kSmiTagMask);
+ // rcx still holds rax & kSmiTag, which is either zero or one.
+ __ decq(rcx); // If rax is a smi, all 1s, else all 0s.
+ __ movq(rbx, rdx);
+ __ xor_(rbx, rax);
+ __ and_(rbx, rcx); // rbx holds either 0 or rax ^ rdx.
+ __ xor_(rbx, rax);
+ // if rax was smi, rbx is now rdx, else rax.
+
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ }
+
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // If the first object is a JS object, we have done pointer comparison.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (rax is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rax);
+ __ push(rdx);
+ __ push(rcx);
+
+ // Inlined floating point compare.
+ // Call builtin if operands are not floating point or smi.
+ Label check_for_symbols;
+ // Push arguments on stack, for helper functions.
+ FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
+ __ FCmp();
+
+ // Jump to builtin for NaN.
+ __ j(parity_even, &call_builtin);
+
+ // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
+ Label below_lbl, above_lbl;
+ // use rdx, rax to convert unsigned to signed comparison
+ __ j(below, &below_lbl);
+ __ j(above, &above_lbl);
+
+ __ xor_(rax, rax); // equal
+ __ ret(2 * kPointerSize);
+
+ __ bind(&below_lbl);
+ __ movq(rax, Immediate(-1));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&above_lbl);
+ __ movq(rax, Immediate(1));
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+
+ // Fast negative check for symbol-to-symbol equality.
+ __ bind(&check_for_symbols);
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register rax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(2 * kPointerSize);
+ }
+
+ __ bind(&call_builtin);
+ // must swap argument order
+ __ pop(rcx);
+ __ pop(rdx);
+ __ pop(rax);
+ __ push(rdx);
+ __ push(rax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == less || cc_ == less_equal) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
+ ncr = LESS;
+ }
+ __ push(Immediate(Smi::FromInt(ncr)));
+ }
+
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ testl(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
+ __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
+ __ j(not_equal, label);
+}
+
+
+// Call the function just below TOS on the stack with the given
+// arguments. The receiver is the TOS.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
+
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ Result answer = frame_->CallStub(&call_function, arg_count + 1);
+ // Restore context and replace function on the stack with the
+ // result of the stub invocation.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &answer);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(not_equal, &runtime);
+ // Value in rcx is Smi encoded.
+
+ // Patch the arguments.length and the parameters pointer.
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ // Shifting code depends on SmiEncoding being equivalent to left shift:
+ // we multiply by four to get pointer alignment.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ lea(rbx, Operand(rbp, rax, times_4, 0));
+ __ neg(rdx);
+ __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ // Shifting code depends on SmiEncoding being equivalent to left shift:
+ // we multiply by four to get pointer alignment.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ lea(rbx, Operand(rbx, rcx, times_4, 0));
+ __ neg(rdx);
+ __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(equal, &adaptor);
+
+ // Nothing to do: The formal number of parameters has already been
+ // passed in register rax by calling function. Just return it.
+ __ ret(0);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame and return it.
+ __ bind(&adaptor);
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ret(0);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
+
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ __ pop(rcx);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ pop(rbp); // pop frame pointer
+ __ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(rsi, rsi); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmpq(rbp, Immediate(0));
+ __ j(equal, &skip);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+ __ ret(0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_out_of_memory_exception,
+ StackFrame::Type frame_type,
+ bool do_gc,
+ bool always_allocate_scope) {
+ // rax: result parameter for PerformGC, if any.
+ // rbx: pointer to C function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // rdi: number of arguments including receiver.
+ // r15: pointer to the first argument (C callee-saved).
+ // This pointer is reused in LeaveExitFrame(), so it is stored in a
+ // callee-saved register.
+
+ if (do_gc) {
+ __ movq(Operand(rsp, 0), rax); // Result.
+ __ movq(kScratchRegister,
+ FUNCTION_ADDR(Runtime::PerformGC),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(kScratchRegister);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ incl(Operand(kScratchRegister, 0));
+ }
+
+ // Call C function.
+#ifdef __MSVC__
+ // MSVC passes arguments in rcx, rdx, r8, r9
+ __ movq(rcx, rdi); // argc.
+ __ movq(rdx, r15); // argv.
+#else // ! defined(__MSVC__)
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+ // First argument is already in rdi.
+ __ movq(rsi, r15); // argv.
+#endif
+ __ call(rbx);
+ // Result is in rax - do not destroy this register!
+
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ decl(Operand(kScratchRegister, 0));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ lea(rcx, Operand(rax, 1));
+ // Lower 2 bits of rcx are 0 iff rax has failure tag.
+ __ testl(rcx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(frame_type);
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry);
+
+ Label continue_exception;
+ // If the returned failure is EXCEPTION then promote Top::pending_exception().
+ __ movq(kScratchRegister, Failure::Exception(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(not_equal, &continue_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ movq(rax, Operand(kScratchRegister, 0));
+ __ movq(rdx, ExternalReference::the_hole_value_location());
+ __ movq(rdx, Operand(rdx, 0));
+ __ movq(Operand(kScratchRegister, 0), rdx);
+
+ __ bind(&continue_exception);
+ // Special handling of out of memory exception.
+ __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rdx, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ __ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ __ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
+ __ store_rax(handler_address);
+
+ // Set external caught exception to false.
+ __ movq(rax, Immediate(false));
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+
+ // Restore the stack to the address of the ENTRY handler
+ __ movq(rsp, rdx);
+
+ // Clear the context pointer;
+ __ xor_(rsi, rsi);
+
+ // Restore registers from handler.
+
+ __ pop(rbp); // FP
+ ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ StackHandlerConstants::kStateOffset);
+ __ pop(rdx); // State
+
+ ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ StackHandlerConstants::kPCOffset);
+ __ ret(0);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
}
void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
- masm->int3(); // TODO(X64): UNIMPLEMENTED.
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (C callee-saved)
+ // rdi: caller's parameter pointer pp (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ StackFrame::Type frame_type = is_debug_break ?
+ StackFrame::EXIT_DEBUG :
+ StackFrame::EXIT;
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(frame_type);
+
+ // rax: result parameter for PerformGC, if any (setup below).
+ // Holds the result of a previous call to GenerateCore that
+ // returned a failure. On next call, it's used as parameter
+ // to Runtime::PerformGC.
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // rdi: number of arguments including receiver (destroyed by C call).
+ // The rdi register is not callee-save in Unix 64-bit ABI, so
+ // we must treat it as volatile.
+ // r15: argv pointer (C callee-saved).
+
+ Label throw_out_of_memory_exception;
+ Label throw_normal_exception;
+
+ // Call into the runtime system. Collect garbage before the call if
+ // running with --gc-greedy set.
+ if (FLAG_gc_greedy) {
+ Failure* failure = Failure::RetryAfterGC(0);
+ __ movq(rax, failure, RelocInfo::NONE);
+ }
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ FLAG_gc_greedy,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ movq(rax, failure, RelocInfo::NONE);
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowOutOfMemory(masm);
+ // control flow for generated will not return.
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
}
@@ -281,7 +6191,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain.
__ bind(&invoke);
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- __ push(rax); // flush TOS
// Clear any pending exceptions.
__ load_rax(ExternalReference::the_hole_value_location());
@@ -302,13 +6211,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference entry(Builtins::JSEntryTrampoline);
__ load_rax(entry);
}
- __ call(FieldOperand(rax, Code::kHeaderSize));
+ __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ call(kScratchRegister);
// Unlink this frame from the handler chain.
__ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
__ pop(Operand(kScratchRegister, 0));
// Pop next_sp.
- __ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
// Restore the top frame descriptor from the stack.
__ bind(&exit);
@@ -323,7 +6233,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(r14);
__ pop(r13);
__ pop(r12);
- __ add(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(rbp);
@@ -331,6 +6241,596 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
+// -----------------------------------------------------------------------------
+// Implementation of stubs.
+
+// Stub classes have public member named masm, not masm_.
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result) {
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ movq(scratch, allocation_top); // scratch: address of allocation top.
+ __ movq(result, Operand(scratch, 0));
+ __ addq(result, Immediate(HeapNumber::kSize)); // New top.
+ __ movq(kScratchRegister, allocation_limit);
+ __ cmpq(result, Operand(kScratchRegister, 0));
+ __ j(above, need_gc);
+
+ __ movq(Operand(scratch, 0), result); // store new top
+ __ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ // Tag old top and use as result.
+}
+
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst) {
+ Label load_smi, done;
+
+ __ testl(src, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ sar(src, Immediate(kSmiTagSize));
+ __ cvtlsi2sd(dst, src);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2) {
+ __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ LoadFloatOperand(masm, kScratchRegister, dst1);
+ __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ LoadFloatOperand(masm, kScratchRegister, dst2);
+}
+
+
+void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst) {
+ // TODO(X64): Convert number operands to int32 values.
+ // Don't convert a Smi to a double first.
+ UNIMPLEMENTED();
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+
+ __ bind(&done);
+}
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
+ __ testl(lhs, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_lhs);
+ __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
+ __ bind(&done_load_lhs);
+
+ __ testl(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_rhs);
+ __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_lhs);
+ ASSERT(kSmiTagSize == 1);
+ ASSERT(kSmiTag == 0);
+ __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+ __ jmp(&done_load_lhs);
+
+ __ bind(&load_smi_rhs);
+ __ movq(kScratchRegister, rhs);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ push(kScratchRegister);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(kScratchRegister);
+
+ __ bind(&done);
+}
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other); // argument in rdx is OK
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in rax is OK
+ __ movq(kScratchRegister,
+ Factory::heap_number_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, non_float); // argument in rax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+}
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // Perform fast-case smi code for the operation (rax <op> rbx) and
+ // leave result in register rax.
+
+ // Prepare the smi check of both operands by or'ing them together
+ // before checking against the smi mask.
+ __ movq(rcx, rbx);
+ __ or_(rcx, rax);
+
+ switch (op_) {
+ case Token::ADD:
+ __ addl(rax, rbx); // add optimistically
+ __ j(overflow, slow);
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ break;
+
+ case Token::SUB:
+ __ subl(rax, rbx); // subtract optimistically
+ __ j(overflow, slow);
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
+ break;
+
+ default:
+ // Fall-through to smi check.
+ break;
+ }
+
+ // Perform the actual smi check.
+ ASSERT(kSmiTag == 0); // adjust zero check if not the case
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ // Do nothing here.
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // adjust code below if not the case
+ // Remove tag from one of the operands (but keep sign).
+ __ sar(rax, Immediate(kSmiTagSize));
+ // Do multiplication.
+ __ imull(rax, rbx); // multiplication of smis; result in eax
+ // Go slow on overflows.
+ __ j(overflow, slow);
+ // Check for negative zero result.
+ __ movsxlq(rax, rax); // Sign extend eax into rax.
+ __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
+ break;
+
+ case Token::DIV:
+ // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
+ __ idiv(rbx);
+ // Check that the remainder is zero.
+ __ testq(rdx, rdx);
+ __ j(not_zero, slow);
+ // Check for the corner case of dividing the most negative smi
+ // by -1. We cannot use the overflow flag, since it is not set
+ // by idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ // TODO(X64): TODO(Smi): Smi implementation dependent constant.
+ // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
+ __ cmpq(rax, Immediate(0x40000000));
+ __ j(equal, slow);
+ // Check for negative zero result.
+ __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
+ // Tag the result and store it in register rax.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ break;
+
+ case Token::MOD:
+ // Divide rdx:rax by rbx.
+ __ idiv(rbx);
+ // Check for negative zero result.
+ __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
+ // Move remainder to register rax.
+ __ movq(rax, rdx);
+ break;
+
+ case Token::BIT_OR:
+ __ or_(rax, rbx);
+ break;
+
+ case Token::BIT_AND:
+ __ and_(rax, rbx);
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(rax, rbx);
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ // Move the second operand into register ecx.
+ __ movq(rcx, rbx);
+ // Remove tags from operands (but keep sign).
+ __ sar(rax, Immediate(kSmiTagSize));
+ __ sar(rcx, Immediate(kSmiTagSize));
+ // Perform the operation.
+ switch (op_) {
+ case Token::SAR:
+ __ sar(rax);
+ // No checks of result necessary
+ break;
+ case Token::SHR:
+ __ shrl(rax); // rcx is implicit shift register
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ testq(rax, Immediate(0xc0000000));
+ __ j(not_zero, slow);
+ break;
+ case Token::SHL:
+ __ shll(rax);
+ // TODO(Smi): Significant change if Smi changes.
+ // Check that the *signed* result fits in a smi.
+ // It does, if the 30th and 31st bits are equal, since then
+ // shifting the SmiTag in at the bottom doesn't change the sign.
+ ASSERT(kSmiTagSize == 1);
+ __ cmpl(rax, Immediate(0xc0000000));
+ __ j(sign, slow);
+ __ movsxlq(rax, rax); // Extend new sign of eax into rax.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Tag the result and store it in register eax.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (flags_ == SMI_CODE_IN_STUB) {
+ // The fast case smi code wasn't inlined in the stub caller
+ // code. Generate it here to speed up common operations.
+ Label slow;
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
+ GenerateSmiCode(masm, &slow);
+ __ ret(2 * kPointerSize); // remove both operands
+
+ // Too bad. The fast case smi code didn't succeed.
+ __ bind(&slow);
+ }
+
+ // Setup registers.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
+
+ // Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ // rax: y
+ // rdx: x
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ // Fast-case: Both operands are numbers.
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ movq(rax, rdx);
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ rcx,
+ rax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // xmm4 and xmm5 are volatile XMM registers.
+ FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+ __ ret(2 * kPointerSize);
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ // TODO(X64): Don't convert a Smi to float and then back to int32
+ // afterwards.
+ FloatingPointHelper::LoadFloatOperands(masm);
+
+ Label skip_allocation, non_smi_result, operand_conversion_failure;
+
+ // Reserve space for converted numbers.
+ __ subq(rsp, Immediate(2 * kPointerSize));
+
+ bool use_sse3 = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ if (use_sse3) {
+ // Truncate the operands to 32-bit integers and check for
+ // exceptions in doing so.
+ CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ __ fisttp_s(Operand(rsp, 0 * kPointerSize));
+ __ fisttp_s(Operand(rsp, 1 * kPointerSize));
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(1));
+ __ j(not_zero, &operand_conversion_failure);
+ } else {
+ // Check if right operand is int32.
+ __ fist_s(Operand(rsp, 0 * kPointerSize));
+ __ fild_s(Operand(rsp, 0 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf(); // TODO(X64): Not available.
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+
+ // Check if left operand is int32.
+ __ fist_s(Operand(rsp, 1 * kPointerSize));
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf(); // TODO(X64): Not available. Test bits in ax directly
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+ }
+
+ // Get int32 operands and perform bitop.
+ __ pop(rcx);
+ __ pop(rax);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(rax, rcx); break;
+ case Token::BIT_AND: __ and_(rax, rcx); break;
+ case Token::BIT_XOR: __ xor_(rax, rcx); break;
+ case Token::SAR: __ sar(rax); break;
+ case Token::SHL: __ shl(rax); break;
+ case Token::SHR: __ shr(rax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ testl(rax, Immediate(0xc0000000));
+ __ j(not_zero, &non_smi_result);
+ } else {
+ // Check if result fits in a smi.
+ __ cmpl(rax, Immediate(0xc0000000));
+ __ j(negative, &non_smi_result);
+ }
+ // Tag smi result and return.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ __ ret(2 * kPointerSize);
+
+ // All ops except SHR return a signed int32 that we load in a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+ rcx, rax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ }
+
+ // Clear the FPU exception flag and reset the stack before calling
+ // the runtime system.
+ __ bind(&operand_conversion_failure);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ if (use_sse3) {
+ // If we've used the SSE3 instructions for truncating the
+ // floating point values to integers and it failed, we have a
+ // pending #IA exception. Clear it.
+ __ fnclex();
+ } else {
+ // The non-SSE3 variant does early bailout if the right
+ // operand isn't a 32-bit integer, so we may have a single
+ // value on the FPU stack we need to get rid of.
+ __ ffree(0);
+ }
+
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_result);
+ }
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+ return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index 5f5daa4..0e8505a 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -273,6 +273,14 @@ class CodeGenState BASE_EMBEDDED {
};
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+ NO_ARGUMENTS_ALLOCATION,
+ EAGER_ARGUMENTS_ALLOCATION,
+ LAZY_ARGUMENTS_ALLOCATION
+};
// -------------------------------------------------------------------------
@@ -374,6 +382,12 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Returns the arguments allocation mode.
+ ArgumentsAllocationMode ArgumentsMode() const;
+
+ // Store the arguments object and allocate it if necessary.
+ Result StoreArgumentsObject(bool initial);
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
@@ -409,6 +423,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@@ -499,11 +514,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ // Support for construct call checks.
+ void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
- // Support for accessing the value field of an object (used by Date).
+ // Support for accessing the class and value fields of an object.
+ void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
@@ -515,6 +534,14 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
+ // Fast support for Math.random().
+ void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+ // Fast support for Math.sin and Math.cos.
+ enum MathOp { SIN, COS };
+ void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+ inline void GenerateMathSin(ZoneList<Expression*>* args);
+ inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
diff --git a/V8Binding/v8/src/x64/frames-x64.cc b/V8Binding/v8/src/x64/frames-x64.cc
index 209aa2d..fe224ad 100644
--- a/V8Binding/v8/src/x64/frames-x64.cc
+++ b/V8Binding/v8/src/x64/frames-x64.cc
@@ -25,3 +25,89 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ }
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute the stack pointer.
+ Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+ // Fill in the state.
+ state->fp = fp;
+ state->sp = sp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ // Determine frame type.
+ if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+ return EXIT_DEBUG;
+ } else {
+ return EXIT;
+ }
+}
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* a) const {
+ // Exit frames on X64 do not contain any pointers. The arguments
+ // are traversed as part of the expression stack of the calling
+ // frame.
+}
+
+byte* InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+byte* JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+ // The arguments for cooked frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when the stack is cooked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/frames-x64.h b/V8Binding/v8/src/x64/frames-x64.h
index 3416f51..24c78da 100644
--- a/V8Binding/v8/src/x64/frames-x64.h
+++ b/V8Binding/v8/src/x64/frames-x64.h
@@ -32,70 +32,69 @@ namespace v8 {
namespace internal {
// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
-// This will all need to change to be correct for x64.
+// This might all need to change to be correct for x64.
static const int kNumRegs = 8;
-static const RegList kJSCallerSaved = 0;
+static const RegList kJSCallerSaved =
+ 1 << 0 | // rax
+ 1 << 1 | // rcx
+ 1 << 2 | // rdx
+ 1 << 3 | // rbx - used as a caller-saved register in JavaScript code
+ 1 << 7; // rdi - callee function
+
static const int kNumJSCallerSaved = 5;
+
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
- static const int kPPOffset = 1 * kPointerSize;
- static const int kFPOffset = 2 * kPointerSize;
-
- static const int kCodeOffset = 3 * kPointerSize;
+ static const int kFPOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kPCOffset = 3 * kPointerSize;
- static const int kStateOffset = 4 * kPointerSize;
- static const int kPCOffset = 5 * kPointerSize;
-
- static const int kAddressDisplacement = -1 * kPointerSize;
- static const int kSize = 6 * kPointerSize;
+ static const int kSize = 4 * kPointerSize;
};
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -1 * kPointerSize;
-
- static const int kFunctionArgOffset = -1 * kPointerSize;
- static const int kReceiverArgOffset = -1 * kPointerSize;
- static const int kArgcOffset = -1 * kPointerSize;
- static const int kArgvOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = -10 * kPointerSize;
};
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -1 * kPointerSize;
+ static const int kDebugMarkOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
- static const int kPPDisplacement = -1 * kPointerSize;
+ static const int kCallerFPOffset = +0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerFPOffset = -1 * kPointerSize;
- static const int kCallerPCOffset = -1 * kPointerSize;
+ // FP-relative displacement of the caller's SP. It points just
+ // below the saved PC.
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
};
class StandardFrameConstants : public AllStatic {
public:
- static const int kExpressionsOffset = -1 * kPointerSize;
- static const int kMarkerOffset = -1 * kPointerSize;
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = -1 * kPointerSize;
- static const int kCallerPCOffset = -1 * kPointerSize;
- static const int kCallerSPOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerSPOffset = +2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = -1 * kPointerSize;
+ static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
- static const int kParam0Offset = -1 * kPointerSize;
+ static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc
index 71a3a9a..abaffb3 100644
--- a/V8Binding/v8/src/x64/ic-x64.cc
+++ b/V8Binding/v8/src/x64/ic-x64.cc
@@ -35,22 +35,56 @@
namespace v8 {
namespace internal {
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
void KeyedLoadIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED();
}
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+
void KeyedLoadIC::Generate(MacroAssembler* masm,
ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(rcx);
+ __ push(rax);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 2);
}
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC0AB)); // Debugging aid.
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC1AB)); // Debugging aid.
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
@@ -58,6 +92,11 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ UNIMPLEMENTED();
+ return false;
+}
+
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
UNIMPLEMENTED();
return NULL;
@@ -105,15 +144,32 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
}
void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
+
+ // Move the return address below the arguments.
+ __ pop(rcx);
+ __ push(Operand(rsp, 1 * kPointerSize));
+ __ push(Operand(rsp, 1 * kPointerSize));
+ __ push(rax);
+ __ push(rcx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(f, 3);
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC2AB)); // Debugging aid.
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC3AB)); // Debugging aid.
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
@@ -124,36 +180,121 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
return NULL;
}
+
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ ExternalReference const& f) {
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function to call from the stack.
+ // 2 ~ receiver, return address.
+ __ movq(rbx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push the receiver and the name of the function.
+ __ push(rdx);
+ __ push(rbx);
+
+ // Call the entry.
+ CEntryStub stub;
+ __ movq(rax, Immediate(2));
+ __ movq(rbx, f);
+ __ CallStub(&stub);
+
+ // Move result to rdi and exit the internal frame.
+ __ movq(rdi, rax);
+ __ LeaveInternalFrame();
+
+ // Check if the receiver is a global object of some sort.
+ Label invoke, global;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &invoke);
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+ __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_GLOBAL_OBJECT_TYPE)));
+ __ j(equal, &global);
+ __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_BUILTINS_OBJECT_TYPE)));
+ __ j(not_equal, &invoke);
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ bind(&invoke);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateNormal(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+
+const int LoadIC::kOffsetToLoadInstruction = 20;
+
+
void LoadIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED();
}
+
void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(rax);
+ __ push(rcx);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 2);
}
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC4AB)); // Debugging aid.
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC5AB)); // Debugging aid.
}
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC6AB)); // Debugging aid.
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC7AB)); // Debugging aid.
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC8AB)); // Debugging aid.
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xC9AB)); // Debugging aid.
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
@@ -162,15 +303,35 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
}
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
- masm->int3(); // UNIMPLEMENTED.
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ // Move the return address below the arguments.
+ __ pop(rbx);
+ __ push(Operand(rsp, 0));
+ __ push(rcx);
+ __ push(rax);
+ __ push(rbx);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 3);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xCAAB)); // Debugging aid.
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED.
+ masm->movq(kScratchRegister, Immediate(0xCBAB)); // Debugging aid.
}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/jump-target-x64.cc b/V8Binding/v8/src/x64/jump-target-x64.cc
index 209aa2d..b804044 100644
--- a/V8Binding/v8/src/x64/jump-target-x64.cc
+++ b/V8Binding/v8/src/x64/jump-target-x64.cc
@@ -25,3 +25,344 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+ ASSERT(cgen()->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen()->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There is an expected frame to merge to.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else if (entry_frame_ != NULL) {
+ // Forward jump with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and jump to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else {
+ // Forward jump. Remember the current frame and emit a jump to
+ // its merge code.
+ AddReachingFrame(cgen()->frame());
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ __ jmp(&merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint b) {
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
+
+ if (is_bound()) {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Backward branch. We have an expected frame to merge to on the
+ // backward edge.
+
+ // Swap the current frame for a copy (we do the swapping to get
+ // the off-frame registers off the fall through) to use for the
+ // branch.
+ VirtualFrame* fall_through_frame = cgen()->frame();
+ VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+ // Check if we can avoid merge code.
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
+ // Branch right in to the block.
+ cgen()->DeleteFrame();
+ __ j(cc, &entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+
+ // Check if we can reuse existing merge code.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL &&
+ cgen()->frame()->Equals(reaching_frames_[i])) {
+ // Branch to the merge code.
+ cgen()->DeleteFrame();
+ __ j(cc, &merge_labels_[i]);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+ }
+
+ // To emit the merge code here, we negate the condition and branch
+ // around the merge code on the fall through path.
+ Label original_fall_through;
+ __ j(NegateCondition(cc), &original_fall_through);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ __ bind(&original_fall_through);
+
+ } else if (entry_frame_ != NULL) {
+ // Forward branch with a preconfigured entry frame. Assert the
+ // current frame matches the expected one and branch to the block.
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ // Explicitly use the macro assembler instead of __ as forward
+ // branches are expected to be a fixed size (no inserted
+ // coverage-checking instructions please). This is used in
+ // Reference::GetValue.
+ cgen()->masm()->j(cc, &entry_label_);
+
+ } else {
+ // Forward branch. A copy of the current frame is remembered and
+ // a branch to the merge code is emitted. Explicitly use the
+ // macro assembler instead of __ as forward branches are expected
+ // to be a fixed size (no inserted coverage-checking instructions
+ // please). This is used in Reference::GetValue.
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
+ cgen()->masm()->j(cc, &merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::Call() {
+ // Call is used to push the address of the catch block on the stack as
+ // a return address when compiling try/catch and try/finally. We
+ // fully spill the frame before making the call. The expected frame
+ // at the label (which should be the only one) is the spilled current
+ // frame plus an in-memory return address. The "fall-through" frame
+ // at the return site is the spilled current frame.
+ ASSERT(cgen() != NULL);
+ ASSERT(cgen()->has_valid_frame());
+ // There are no non-frame references across the call.
+ ASSERT(cgen()->HasValidEntryRegisters());
+ ASSERT(!is_linked());
+
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+ target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
+ AddReachingFrame(target_frame);
+ __ call(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind() {
+ ASSERT(cgen() != NULL);
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+ // Fast case: the jump target was manually configured with an entry
+ // frame to use.
+ if (entry_frame_ != NULL) {
+ // Assert no reaching frames to deal with.
+ ASSERT(reaching_frames_.is_empty());
+ ASSERT(!cgen()->has_valid_frame());
+
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ // Copy the entry frame so the original can be used for a
+ // possible backward jump.
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ } else {
+ // Take ownership of the entry frame.
+ cgen()->SetFrame(entry_frame_, &empty);
+ entry_frame_ = NULL;
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+
+ if (!is_linked()) {
+ ASSERT(cgen()->has_valid_frame());
+ if (direction_ == FORWARD_ONLY) {
+ // Fast case: no forward jumps and no possible backward jumps.
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+ } else {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Fast case: no forward jumps, possible backward ones. Remove
+ // constants and copies above the watermark on the fall-through
+ // frame and use it as the entry frame.
+ cgen()->frame()->MakeMergable();
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+
+ if (direction_ == FORWARD_ONLY &&
+ !cgen()->has_valid_frame() &&
+ reaching_frames_.length() == 1) {
+ // Fast case: no fall-through, a single forward jump, and no
+ // possible backward jumps. Pick up the only reaching frame, take
+ // ownership of it, and use it for the block about to be emitted.
+ VirtualFrame* frame = reaching_frames_[0];
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[0] = NULL;
+ __ bind(&merge_labels_[0]);
+
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ __ bind(&entry_label_);
+ return;
+ }
+
+ // If there is a current frame, record it as the fall-through. It
+ // is owned by the reaching frames for now.
+ bool had_fall_through = false;
+ if (cgen()->has_valid_frame()) {
+ had_fall_through = true;
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ }
+
+ // Compute the frame to use for entry to the block.
+ ComputeEntryFrame();
+
+ // Some moves required to merge to an expected frame require purely
+ // frame state changes, and do not require any code generation.
+ // Perform those first to increase the possibility of finding equal
+ // frames below.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL) {
+ reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+ }
+ }
+
+ if (is_linked()) {
+ // There were forward jumps. Handle merging the reaching frames
+ // to the entry frame.
+
+ // Loop over the (non-null) reaching frames and process any that
+ // need merge code. Iterate backwards through the list to handle
+ // the fall-through frame first. Set frames that will be
+ // processed after 'i' to NULL if we want to avoid processing
+ // them.
+ for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+ VirtualFrame* frame = reaching_frames_[i];
+
+ if (frame != NULL) {
+ // Does the frame (probably) need merge code?
+ if (!frame->Equals(entry_frame_)) {
+ // We could have a valid frame as the fall through to the
+ // binding site or as the fall through from a previous merge
+ // code block. Jump around the code we are about to
+ // generate.
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ }
+ // Pick up the frame for this block. Assume ownership if
+ // there cannot be backward jumps.
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
+ } else {
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ __ bind(&merge_labels_[i]);
+
+ // Loop over the remaining (non-null) reaching frames,
+ // looking for any that can share merge code with this one.
+ for (int j = 0; j < i; j++) {
+ VirtualFrame* other = reaching_frames_[j];
+ if (other != NULL && other->Equals(cgen()->frame())) {
+ // Set the reaching frame element to null to avoid
+ // processing it later, and then bind its entry label.
+ reaching_frames_[j] = NULL;
+ __ bind(&merge_labels_[j]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen()->frame()->MergeTo(entry_frame_);
+ } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+ // If this is the fall through frame, and it didn't need
+ // merge code, we need to pick up the frame so we can jump
+ // around subsequent merge blocks if necessary.
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ }
+ }
+
+ // The code generator may not have a current frame if there was no
+ // fall through and none of the reaching frames needed merging.
+ // In that case, clone the entry frame as the current frame.
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ }
+
+ // There may be unprocessed reaching frames that did not need
+ // merge code. They will have unbound merge labels. Bind their
+ // merge labels to be the same as the entry label and deallocate
+ // them.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (!merge_labels_[i].is_bound()) {
+ reaching_frames_[i] = NULL;
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // There are non-NULL reaching frames with bound labels for each
+ // merge block, but only on backward targets.
+ } else {
+ // There were no forward jumps. There must be a current frame and
+ // this must be a bidirectional target.
+ ASSERT(reaching_frames_.length() == 1);
+ ASSERT(reaching_frames_[0] != NULL);
+ ASSERT(direction_ == BIDIRECTIONAL);
+
+ // Use a copy of the reaching frame so the original can be saved
+ // for possible reuse as a backward merge block.
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+ __ bind(&merge_labels_[0]);
+ cgen()->frame()->MergeTo(entry_frame_);
+ }
+
+ __ bind(&entry_label_);
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index 54c299d..780fcdb 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -29,7 +29,9 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "assembler-x64.h"
#include "macro-assembler-x64.h"
+#include "debug.h"
namespace v8 {
namespace internal {
@@ -43,8 +45,165 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
-void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
- UNIMPLEMENTED();
+// TODO(x64): For now, the write barrier is disabled on x64 and we
+// therefore generate no code. This should be fixed when the write
+// barrier is enabled.
+void MacroAssembler::RecordWrite(Register object, int offset,
+ Register value, Register scratch) {
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+ if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+ Label L;
+ j(cc, &L);
+ Abort(msg);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::NegativeZeroTest(Register result,
+ Register op,
+ Label* then_label) {
+ Label ok;
+ testq(result, result);
+ j(not_zero, &ok);
+ testq(op, op);
+ j(sign, then_label);
+ bind(&ok);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ push(rax);
+ movq(kScratchRegister, p0, RelocInfo::NONE);
+ push(kScratchRegister);
+ movq(kScratchRegister,
+ reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+ RelocInfo::NONE);
+ push(kScratchRegister);
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ movq(kScratchRegister, stub->GetCode(), RelocInfo::CODE_TARGET);
+ call(kScratchRegister);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+ if (num_arguments > 0) {
+ addq(rsp, Immediate(num_arguments * kPointerSize));
+ }
+ movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ IllegalOperation(num_arguments);
+ return;
+ }
+
+ Runtime::FunctionId function_id =
+ static_cast<Runtime::FunctionId>(f->stub_id);
+ RuntimeStub stub(function_id, num_arguments);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
+ int num_arguments) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ movq(rax, Immediate(num_arguments));
+ JumpToBuiltin(ext);
+}
+
+
+void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+ // Set the entry point and jump to the C entry runtime stub.
+ movq(rbx, ext);
+ CEntryStub ces;
+ movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
+ jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+
+ movq(target, code, RelocInfo::EXTERNAL_REFERENCE); // Is external reference?
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+ Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
+ unresolved_.Add(entry);
+ }
+ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+ bool* resolved) {
+ // Move the builtin function into the temporary function slot by
+ // reading it from the builtins object. NOTE: We should be able to
+ // reduce this to two instructions by putting the function table in
+ // the global object instead of the "builtins" object and by using a
+ // real register for the function.
+ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
+ int builtins_offset =
+ JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+ movq(rdi, FieldOperand(rdx, builtins_offset));
+
+
+ return Builtins::GetCode(id, resolved);
}
@@ -71,18 +230,121 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
+bool MacroAssembler::IsUnsafeSmi(Smi* value) {
+ return false;
+}
+
+void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
+ UNIMPLEMENTED();
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> source) {
+ if (source->IsSmi()) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(dst, source);
+ } else {
+ movq(dst, source, RelocInfo::NONE);
+ }
+ } else {
+ movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ movq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::Push(Handle<Object> source) {
+ Move(kScratchRegister, source);
+ push(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(ExternalReference ext) {
+ movq(kScratchRegister, ext);
+ jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
+ movq(kScratchRegister, destination, rmode);
+ jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ WriteRecordedPositions();
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+ Label target;
+ bind(&target);
+#endif
+ jmp(kScratchRegister);
+#ifdef DEBUG
+ ASSERT_EQ(kTargetAddrToReturnAddrDist,
+ SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
+void MacroAssembler::Call(ExternalReference ext) {
+ movq(kScratchRegister, ext);
+ call(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+ movq(kScratchRegister, destination, rmode);
+ call(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ WriteRecordedPositions();
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ movq(kScratchRegister, code_object, rmode);
+#ifdef DEBUG
+ Label target;
+ bind(&target);
+#endif
+ call(kScratchRegister);
+#ifdef DEBUG
+ ASSERT_EQ(kTargetAddrToReturnAddrDist,
+ SizeOfCodeGeneratedSince(&target) + kPointerSize);
+#endif
+}
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
- // The pc (return address) is already on TOS.
- // This code pushes state, code, frame pointer and parameter pointer.
- // Check that they are expected next on the stack, int that order.
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // The pc (return address) is already on TOS. This code pushes state,
+ // frame pointer and current handler. Check that they are expected
+ // next on the stack, in that order.
ASSERT_EQ(StackHandlerConstants::kStateOffset,
StackHandlerConstants::kPCOffset - kPointerSize);
- ASSERT_EQ(StackHandlerConstants::kCodeOffset,
- StackHandlerConstants::kStateOffset - kPointerSize);
ASSERT_EQ(StackHandlerConstants::kFPOffset,
- StackHandlerConstants::kCodeOffset - kPointerSize);
- ASSERT_EQ(StackHandlerConstants::kPPOffset,
+ StackHandlerConstants::kStateOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kNextOffset,
StackHandlerConstants::kFPOffset - kPointerSize);
if (try_location == IN_JAVASCRIPT) {
@@ -91,26 +353,452 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} else {
push(Immediate(StackHandler::TRY_FINALLY));
}
- push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
push(rbp);
- push(rdi);
} else {
ASSERT(try_location == IN_JS_ENTRY);
- // The parameter pointer is meaningless here and ebp does not
- // point to a JS frame. So we save NULL for both pp and ebp. We
- // expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
+ // The frame pointer does not point to a JS frame so we save NULL
+ // for rbp. We expect the code throwing an exception to check rbp
+ // before dereferencing it to restore the context.
push(Immediate(StackHandler::ENTRY));
- push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
- push(Immediate(0)); // NULL frame pointer
- push(Immediate(0)); // NULL parameter pointer
+ push(Immediate(0)); // NULL frame pointer.
}
+ // Save the current handler.
movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- // Cached TOS.
- movq(rax, Operand(kScratchRegister, 0));
+ push(Operand(kScratchRegister, 0));
// Link this handler.
movq(Operand(kScratchRegister, 0), rsp);
}
+void MacroAssembler::Ret() {
+ ret(0);
+}
+
+
+void MacroAssembler::FCmp() {
+ fcompp();
+ push(rax);
+ fnstsw_ax();
+ // TODO(X64): Check that sahf is safe to use, using CPUProbe.
+ sahf();
+ pop(rax);
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+ InstanceType type,
+ Register map) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+ cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(type)));
+}
+
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ movq(kScratchRegister, ExternalReference(counter));
+ movl(Operand(kScratchRegister, 0), Immediate(value));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ movq(kScratchRegister, ExternalReference(counter));
+ Operand operand(kScratchRegister, 0);
+ if (value == 1) {
+ incl(operand);
+ } else {
+ addl(operand, Immediate(value));
+ }
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ movq(kScratchRegister, ExternalReference(counter));
+ Operand operand(kScratchRegister, 0);
+ if (value == 1) {
+ decl(operand);
+ } else {
+ subl(operand, Immediate(value));
+ }
+ }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Push the content of the memory location to the stack.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ movq(kScratchRegister, reg_addr);
+ push(Operand(kScratchRegister, 0));
+ }
+ }
+}
+
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of registers to memory location.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ movq(kScratchRegister, reg_addr);
+ movq(Operand(kScratchRegister, 0), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of memory location to registers.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ movq(kScratchRegister, reg_addr);
+ movq(reg, Operand(kScratchRegister, 0));
+ }
+ }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Pop the content from the stack to the memory location.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ movq(kScratchRegister, reg_addr);
+ pop(Operand(kScratchRegister, 0));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs) {
+ ASSERT(!scratch.is(kScratchRegister));
+ ASSERT(!base.is(kScratchRegister));
+ ASSERT(!base.is(scratch));
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the stack to the memory location and adjust base.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ movq(scratch, Operand(base, 0));
+ ExternalReference reg_addr =
+ ExternalReference(Debug_Address::Register(i));
+ movq(kScratchRegister, reg_addr);
+ movq(Operand(kScratchRegister, 0), scratch);
+ lea(base, Operand(base, kPointerSize));
+ }
+ }
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ // Calls are not allowed in some stubs.
+ ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+
+ // Rely on the assertion to check that the number of provided
+ // arguments match the expected number of arguments. Fake a
+ // parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ InvokeCode(Handle<Code>(code), expected, expected,
+ RelocInfo::CODE_TARGET, flag);
+
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+ // The target address for the jump is stored as an immediate at offset
+ // kInvokeCodeAddressOffset.
+ if (!resolved) {
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+ Unresolved entry =
+ { pc_offset() - kTargetAddrToReturnAddrDist, flags, name };
+ unresolved_.Add(entry);
+ }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ Label* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ movq(rax, Immediate(actual.immediate()));
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ movq(rbx, Immediate(expected.immediate()));
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(rbx));
+ movq(rax, Immediate(actual.immediate()));
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ movq(kScratchRegister, adaptor, RelocInfo::CODE_TARGET);
+ if (flag == CALL_FUNCTION) {
+ call(kScratchRegister);
+ jmp(done);
+ } else {
+ jmp(kScratchRegister);
+ }
+ bind(&invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ Label done;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ call(code);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ jmp(code);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocInfo::Mode rmode,
+ InvokeFlag flag) {
+ Label done;
+ Register dummy = rax;
+ InvokePrologue(expected, actual, code, dummy, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ Call(code, rmode);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code, rmode);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(function.is(rdi));
+ movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ movsxlq(rbx,
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
+ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+ ParameterCount expected(rbx);
+ InvokeCode(rdx, expected, actual, flag);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ push(rbp);
+ movq(rbp, rsp);
+ push(rsi); // Context.
+ push(Immediate(Smi::FromInt(type)));
+ movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ push(kScratchRegister);
+ if (FLAG_debug_code) {
+ movq(kScratchRegister,
+ Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ cmpq(Operand(rsp, 0), kScratchRegister);
+ Check(not_equal, "code object not properly patched");
+ }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ if (FLAG_debug_code) {
+ movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+ cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ Check(equal, "stack frame types must match");
+ }
+ movq(rsp, rbp);
+ pop(rbp);
+}
+
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+ ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+ // Setup the frame structure on the stack.
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ push(rbp);
+ movq(rbp, rsp);
+
+ // Reserve room for entry stack pointer and push the debug marker.
+ ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ push(Immediate(0)); // saved entry sp, patched before call
+ push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+
+ // Save the frame pointer and the context in top.
+ ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ ExternalReference context_address(Top::k_context_address);
+ movq(rdi, rax); // Backup rax before we use it.
+
+ movq(rax, rbp);
+ store_rax(c_entry_fp_address);
+ movq(rax, rsi);
+ store_rax(context_address);
+
+ // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // so it must be retained across the C-call.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ lea(r15, Operand(rbp, rdi, times_pointer_size, offset));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Save the state of all registers to the stack from the memory
+ // location. This is needed to allow nested break points.
+ if (type == StackFrame::EXIT_DEBUG) {
+ // TODO(1243899): This should be symmetric to
+ // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+ // correct here, but computed for the other call. Very error
+ // prone! FIX THIS. Actually there are deeper problems with
+ // register saving than this asymmetry (see the bug report
+ // associated with this issue).
+ PushRegistersFromMemory(kJSCallerSaved);
+ }
+#endif
+
+ // Reserve space for two arguments: argc and argv
+ subq(rsp, Immediate(2 * kPointerSize));
+
+ // Get the required frame alignment for the OS.
+ static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ movq(kScratchRegister, Immediate(-kFrameAlignment));
+ and_(rsp, kScratchRegister);
+ }
+
+ // Patch the saved entry sp.
+ movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+ // Registers:
+ // r15 : argv
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Restore the memory copy of the registers by digging them out from
+ // the stack. This is needed to allow nested break points.
+ if (type == StackFrame::EXIT_DEBUG) {
+ // It's okay to clobber register ebx below because we don't need
+ // the function pointer after this.
+ const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+ int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ lea(rbx, Operand(rbp, kOffset));
+ CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
+ }
+#endif
+
+ // Get the return address from the stack and restore the frame pointer.
+ movq(rcx, Operand(rbp, 1 * kPointerSize));
+ movq(rbp, Operand(rbp, 0 * kPointerSize));
+
+ // Pop the arguments and the receiver from the caller stack.
+ lea(rsp, Operand(r15, 1 * kPointerSize));
+
+ // Restore current context from top and clear it in debug mode.
+ ExternalReference context_address(Top::k_context_address);
+ movq(kScratchRegister, context_address);
+ movq(rsi, Operand(kScratchRegister, 0));
+#ifdef DEBUG
+ movq(Operand(kScratchRegister, 0), Immediate(0));
+#endif
+
+ // Push the return address to get ready to return.
+ push(rcx);
+
+ // Clear the top frame.
+ ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+ movq(kScratchRegister, c_entry_fp_address);
+ movq(Operand(kScratchRegister, 0), Immediate(0));
+}
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index 4af372a..c298a25 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -117,7 +117,7 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(const Operand& code,
+ void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
@@ -141,10 +141,39 @@ class MacroAssembler: public Assembler {
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // ---------------------------------------------------------------------------
+ // Macro instructions
+
// Expression support
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // Handle support
+ bool IsUnsafeSmi(Smi* value);
+ bool IsUnsafeSmi(Handle<Object> value) {
+ return IsUnsafeSmi(Smi::cast(*value));
+ }
+
+ void LoadUnsafeSmi(Register dst, Smi* source);
+ void LoadUnsafeSmi(Register dst, Handle<Object> source) {
+ LoadUnsafeSmi(dst, Smi::cast(*source));
+ }
+
+ void Move(Register dst, Handle<Object> source);
+ void Move(const Operand& dst, Handle<Object> source);
+ void Cmp(Register dst, Handle<Object> source);
+ void Cmp(const Operand& dst, Handle<Object> source);
+ void Push(Handle<Object> source);
+
+ // Control Flow
+ void Jump(Address destination, RelocInfo::Mode rmode);
+ void Jump(ExternalReference ext);
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+
+ void Call(Address destination, RelocInfo::Mode rmode);
+ void Call(ExternalReference ext);
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -159,9 +188,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain.
- // The return address must be pushed before calling this helper.
- // On exit, rax contains TOS (next_sp).
+ // Push a new try handler and link into try handler chain. The return
+ // address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
@@ -292,13 +320,13 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code
- // code object on installation.
+ // object on installation.
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
- const Operand& code_operand,
+ Register code_register,
Label* done,
InvokeFlag flag);
diff --git a/V8Binding/v8/src/x64/register-allocator-x64-inl.h b/V8Binding/v8/src/x64/register-allocator-x64-inl.h
index f369d7d..926dd64 100644
--- a/V8Binding/v8/src/x64/register-allocator-x64-inl.h
+++ b/V8Binding/v8/src/x64/register-allocator-x64-inl.h
@@ -37,33 +37,50 @@ namespace internal {
// RegisterAllocator implementation.
bool RegisterAllocator::IsReserved(Register reg) {
- // All registers are reserved for now.
- return true;
+ return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
+ reg.is(kScratchRegister);
}
// The register allocator uses small integers to represent the
// non-reserved assembler registers.
-
int RegisterAllocator::ToNumber(Register reg) {
ASSERT(reg.is_valid() && !IsReserved(reg));
- UNIMPLEMENTED();
- return -1;
+ static const int numbers[] = {
+ 0, // rax
+ 2, // rcx
+ 3, // rdx
+ 1, // rbx
+ -1, // rsp
+ -1, // rbp
+ -1, // rsi
+ 4, // rdi
+ 5, // r8
+ 6, // r9
+ -1, // r10
+ 7, // r11
+ 11, // r12
+ 10, // r13
+ 8, // r14
+ 9 // r15
+ };
+ return numbers[reg.code()];
}
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
- UNIMPLEMENTED();
- return no_reg;
+ static Register registers[] =
+ { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
+ return registers[num];
}
void RegisterAllocator::Initialize() {
- UNIMPLEMENTED();
+ Reset();
+ // The non-reserved rdi register is live on JS function entry.
+ Use(rdi); // JS function.
}
-
-
} } // namespace v8::internal
#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/V8Binding/v8/src/x64/register-allocator-x64.cc b/V8Binding/v8/src/x64/register-allocator-x64.cc
index 209aa2d..deb2318 100644
--- a/V8Binding/v8/src/x64/register-allocator-x64.cc
+++ b/V8Binding/v8/src/x64/register-allocator-x64.cc
@@ -25,3 +25,60 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+ ASSERT(is_valid());
+ if (is_constant()) {
+ Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+ // This result becomes a copy of the fresh one.
+ *this = fresh;
+ }
+ ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+ ASSERT(is_valid());
+ if (!is_register() || !reg().is(target)) {
+ Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+ ASSERT(fresh.is_valid());
+ if (is_register()) {
+ CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
+ } else {
+ ASSERT(is_constant());
+ CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
+ }
+ *this = fresh;
+ } else if (is_register() && reg().is(target)) {
+ ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+ CodeGeneratorScope::Current()->frame()->Spill(target);
+ ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+ }
+ ASSERT(is_register());
+ ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ // This function is not used in 64-bit code.
+ UNREACHABLE();
+ return Result();
+}
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/register-allocator-x64.h b/V8Binding/v8/src/x64/register-allocator-x64.h
index bc08112..8672796 100644
--- a/V8Binding/v8/src/x64/register-allocator-x64.h
+++ b/V8Binding/v8/src/x64/register-allocator-x64.h
@@ -35,7 +35,7 @@ class RegisterAllocatorConstants : public AllStatic {
public:
// Register allocation is not yet implemented on x64, but C++
// forbids 0-length arrays so we use 1 as the number of registers.
- static const int kNumRegisters = 1;
+ static const int kNumRegisters = 12;
static const int kInvalidRegister = -1;
};
diff --git a/V8Binding/v8/src/x64/simulator-x64.h b/V8Binding/v8/src/x64/simulator-x64.h
index 8160e53..6b4d718 100644
--- a/V8Binding/v8/src/x64/simulator-x64.h
+++ b/V8Binding/v8/src/x64/simulator-x64.h
@@ -31,6 +31,7 @@
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
+// TODO(X64): Don't pass p0, since it isn't used?
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index 209aa2d..f2e0e19 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -25,3 +25,154 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM((&masm_))
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* a,
+ JSObject* b,
+ JSFunction* c,
+ StubCompiler::CheckType d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallField(Object* a,
+ JSObject* b,
+ int c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
+ JSObject* b,
+ AccessorInfo* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
+ JSObject* b,
+ Object* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* a,
+ JSObject* b,
+ int c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
+ AccessorInfo* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* a,
+ int b,
+ Map* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(JSGlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+// TODO(1241006): Avoid having lazy compile stubs specialized by the
+// number of arguments. It is not needed anymore.
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+
+ __ push(rdi); // function is also the parameter to the runtime call
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(rdi);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rcx);
+
+ return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.cc b/V8Binding/v8/src/x64/virtual-frame-x64.cc
index 209aa2d..888fdc2 100644
--- a/V8Binding/v8/src/x64/virtual-frame-x64.cc
+++ b/V8Binding/v8/src/x64/virtual-frame-x64.cc
@@ -25,3 +25,1028 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address. All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
+ elements_.Add(FrameElement::MemoryElement());
+ }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ register_locations_[i] = kIllegalIndex;
+ }
+}
+
+
+void VirtualFrame::Enter() {
+ // Registers live on entry to a JS frame:
+ // rsp: stack pointer, points to return address from this function.
+ // rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
+ // Trampoline frame.
+ // rsi: context of this function call.
+ // rdi: pointer to this function object.
+ Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+ // Verify that rdi contains a JS function. The following code
+ // relies on rax being available for use.
+ __ testl(rdi, Immediate(kSmiTagMask));
+ __ Check(not_zero,
+ "VirtualFrame::Enter - rdi is not a function (smi check).");
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+ __ Check(equal,
+ "VirtualFrame::Enter - rdi is not a function (map check).");
+#endif
+
+ EmitPush(rbp);
+
+ __ movq(rbp, rsp);
+
+ // Store the context in the frame. The context is kept in rsi and a
+ // copy is stored in the frame. The external reference to rsi
+ // remains.
+ EmitPush(rsi);
+
+ // Store the function in the frame. The frame owns the register
+ // reference now (ie, it can keep it in rdi or spill it later).
+ Push(rdi);
+ SyncElementAt(element_count() - 1);
+ cgen()->allocator()->Unuse(rdi);
+}
+
+
+void VirtualFrame::Exit() {
+ Comment cmnt(masm(), "[ Exit JS frame");
+ // Record the location of the JS exit code for patching when setting
+ // break point.
+ __ RecordJSReturn();
+
+ // Avoid using the leave instruction here, because it is too
+ // short. We need the return sequence to be a least the size of a
+ // call instruction to support patching the exit code in the
+ // debugger. See GenerateReturnSequence for the full return sequence.
+ // TODO(X64): A patched call will be very long now. Make sure we
+ // have enough room.
+ __ movq(rsp, rbp);
+ stack_pointer_ = frame_pointer();
+ for (int i = element_count() - 1; i > stack_pointer_; i--) {
+ FrameElement last = elements_.RemoveLast();
+ if (last.is_register()) {
+ Unuse(last.reg());
+ }
+ }
+
+ EmitPop(rbp);
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
+ if (count > 0) {
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ // The locals are initialized to a constant (the undefined value), but
+ // we sync them with the actual frame to allocate space for spilling
+ // them later. First sync everything above the stack pointer so we can
+ // use pushes to allocate and initialize the locals.
+ SyncRange(stack_pointer_ + 1, element_count() - 1);
+ Handle<Object> undefined = Factory::undefined_value();
+ FrameElement initial_value =
+ FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ elements_.Add(initial_value);
+ stack_pointer_++;
+ __ push(kScratchRegister);
+ }
+ }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ movq(Operand(rbp, fp_relative(context_index())), rsi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ ASSERT(elements_[context_index()].is_memory());
+ __ movq(rsi, Operand(rbp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ lea(temp.reg(), ParameterAt(-1));
+ Push(&temp);
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(const Operand& operand) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(const Operand& operand) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(immediate);
+}
+
+
+void VirtualFrame::EmitPush(Handle<Object> value) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ Push(value);
+}
+
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(height() >= count);
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ addq(rsp, Immediate(num_dropped * kPointerSize));
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+ FrameElement original = elements_[index];
+
+ // Is this element the backing store of any copies?
+ int new_backing_index = kIllegalIndex;
+ if (original.is_copied()) {
+ // Verify it is copied, and find first copy.
+ for (int i = index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == index) {
+ new_backing_index = i;
+ break;
+ }
+ }
+ }
+
+ if (new_backing_index == kIllegalIndex) {
+ // No copies found, return kIllegalIndex.
+ if (original.is_register()) {
+ Unuse(original.reg());
+ }
+ elements_[index] = FrameElement::InvalidElement();
+ return kIllegalIndex;
+ }
+
+ // This is the backing store of copies.
+ Register backing_reg;
+ if (original.is_memory()) {
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ Use(fresh.reg(), new_backing_index);
+ backing_reg = fresh.reg();
+ __ movq(backing_reg, Operand(rbp, fp_relative(index)));
+ } else {
+ // The original was in a register.
+ backing_reg = original.reg();
+ set_register_location(backing_reg, new_backing_index);
+ }
+ // Invalidate the element at index.
+ elements_[index] = FrameElement::InvalidElement();
+ // Set the new backing element.
+ if (elements_[new_backing_index].is_synced()) {
+ elements_[new_backing_index] =
+ FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+ } else {
+ elements_[new_backing_index] =
+ FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+ }
+ // Update the other copies.
+ for (int i = new_backing_index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == index) {
+ elements_[i].set_index(new_backing_index);
+ elements_[new_backing_index].set_copied();
+ }
+ }
+ return new_backing_index;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index <= element_count());
+ FrameElement original = elements_[index];
+ int new_backing_store_index = InvalidateFrameSlotAt(index);
+ if (new_backing_store_index != kIllegalIndex) {
+ elements_.Add(CopyElementAt(new_backing_store_index));
+ return;
+ }
+
+ switch (original.type()) {
+ case FrameElement::MEMORY: {
+ // Emit code to load the original element's data into a register.
+ // Push that register as a FrameElement on top of the frame.
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid());
+ FrameElement new_element =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg(), element_count());
+ elements_.Add(new_element);
+ __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
+ break;
+ }
+ case FrameElement::REGISTER:
+ Use(original.reg(), element_count());
+ // Fall through.
+ case FrameElement::CONSTANT:
+ case FrameElement::COPY:
+ original.clear_sync();
+ elements_.Add(original);
+ break;
+ case FrameElement::INVALID:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ // Store the value on top of the frame to the virtual frame slot at
+ // a given index. The value on top of the frame is left in place.
+ // This is a duplicating operation, so it can create copies.
+ ASSERT(index >= 0);
+ ASSERT(index < element_count());
+
+ int top_index = element_count() - 1;
+ FrameElement top = elements_[top_index];
+ FrameElement original = elements_[index];
+ if (top.is_copy() && top.index() == index) return;
+ ASSERT(top.is_valid());
+
+ InvalidateFrameSlotAt(index);
+
+ // InvalidateFrameSlotAt can potentially change any frame element, due
+ // to spilling registers to allocate temporaries in order to preserve
+ // the copy-on-write semantics of aliased elements. Reload top from
+ // the frame.
+ top = elements_[top_index];
+
+ if (top.is_copy()) {
+ // There are two cases based on the relative positions of the
+ // stored-to slot and the backing slot of the top element.
+ int backing_index = top.index();
+ ASSERT(backing_index != index);
+ if (backing_index < index) {
+ // 1. The top element is a copy of a slot below the stored-to
+ // slot. The stored-to slot becomes an unsynced copy of that
+ // same backing slot.
+ elements_[index] = CopyElementAt(backing_index);
+ } else {
+ // 2. The top element is a copy of a slot above the stored-to
+ // slot. The stored-to slot becomes the new (unsynced) backing
+ // slot and both the top element and the element at the former
+ // backing slot become copies of it. The sync state of the top
+ // and former backing elements is preserved.
+ FrameElement backing_element = elements_[backing_index];
+ ASSERT(backing_element.is_memory() || backing_element.is_register());
+ if (backing_element.is_memory()) {
+ // Because sets of copies are canonicalized to be backed by
+ // their lowest frame element, and because memory frame
+ // elements are backed by the corresponding stack address, we
+ // have to move the actual value down in the stack.
+ //
+ // TODO(209): considering allocating the stored-to slot to the
+ // temp register. Alternatively, allow copies to appear in
+ // any order in the frame and lazily move the value down to
+ // the slot.
+ __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else {
+ set_register_location(backing_element.reg(), index);
+ if (backing_element.is_synced()) {
+ // If the element is a register, we will not actually move
+ // anything on the stack but only update the virtual frame
+ // element.
+ backing_element.clear_sync();
+ }
+ }
+ elements_[index] = backing_element;
+
+ // The old backing element becomes a copy of the new backing
+ // element.
+ FrameElement new_element = CopyElementAt(index);
+ elements_[backing_index] = new_element;
+ if (backing_element.is_synced()) {
+ elements_[backing_index].set_sync();
+ }
+
+ // All the copies of the old backing element (including the top
+ // element) become copies of the new backing element.
+ for (int i = backing_index + 1; i < element_count(); i++) {
+ if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
+ elements_[i].set_index(index);
+ }
+ }
+ }
+ return;
+ }
+
+ // Move the top element to the stored-to slot and replace it (the
+ // top element) with a copy.
+ elements_[index] = top;
+ if (top.is_memory()) {
+ // TODO(209): consider allocating the stored-to slot to the temp
+ // register. Alternatively, allow copies to appear in any order
+ // in the frame and lazily move the value down to the slot.
+ FrameElement new_top = CopyElementAt(index);
+ new_top.set_sync();
+ elements_[top_index] = new_top;
+
+ // The sync state of the former top element is correct (synced).
+ // Emit code to move the value down in the frame.
+ __ movq(kScratchRegister, Operand(rsp, 0));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else if (top.is_register()) {
+ set_register_location(top.reg(), index);
+ // The stored-to slot has the (unsynced) register reference and
+ // the top element becomes a copy. The sync state of the top is
+ // preserved.
+ FrameElement new_top = CopyElementAt(index);
+ if (top.is_synced()) {
+ new_top.set_sync();
+ elements_[index].clear_sync();
+ }
+ elements_[top_index] = new_top;
+ } else {
+ // The stored-to slot holds the same value as the top but
+ // unsynced. (We do not have copies of constants yet.)
+ ASSERT(top.is_constant());
+ elements_[index].clear_sync();
+ }
+}
+
+
+void VirtualFrame::MakeMergable() {
+ for (int i = 0; i < element_count(); i++) {
+ FrameElement element = elements_[i];
+
+ if (element.is_constant() || element.is_copy()) {
+ if (element.is_synced()) {
+ // Just spill.
+ elements_[i] = FrameElement::MemoryElement();
+ } else {
+ // Allocate to a register.
+ FrameElement backing_element; // Invalid if not a copy.
+ if (element.is_copy()) {
+ backing_element = elements_[element.index()];
+ }
+ Result fresh = cgen()->allocator()->Allocate();
+ ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
+ elements_[i] =
+ FrameElement::RegisterElement(fresh.reg(),
+ FrameElement::NOT_SYNCED);
+ Use(fresh.reg(), i);
+
+ // Emit a move.
+ if (element.is_constant()) {
+ __ Move(fresh.reg(), element.handle());
+ } else {
+ ASSERT(element.is_copy());
+ // Copies are only backed by register or memory locations.
+ if (backing_element.is_register()) {
+ // The backing store may have been spilled by allocating,
+ // but that's OK. If it was, the value is right where we
+ // want it.
+ if (!fresh.reg().is(backing_element.reg())) {
+ __ movq(fresh.reg(), backing_element.reg());
+ }
+ } else {
+ ASSERT(backing_element.is_memory());
+ __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
+ }
+ }
+ }
+ // No need to set the copied flag --- there are no copies.
+ } else {
+ // Clear the copy flag of non-constant, non-copy elements.
+ // They cannot be copied because copies are not allowed.
+ // The copy flag is not relied on before the end of this loop,
+ // including when registers are spilled.
+ elements_[i].clear_copied();
+ }
+ }
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ Comment cmnt(masm(), "[ Merge frame");
+ // We should always be merging the code generator's current frame to an
+ // expected frame.
+ ASSERT(cgen()->frame() == this);
+
+ // Adjust the stack pointer upward (toward the top of the virtual
+ // frame) if necessary.
+ if (stack_pointer_ < expected->stack_pointer_) {
+ int difference = expected->stack_pointer_ - stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ subq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ MergeMoveRegistersToMemory(expected);
+ MergeMoveRegistersToRegisters(expected);
+ MergeMoveMemoryToRegisters(expected);
+
+ // Adjust the stack pointer downward if necessary.
+ if (stack_pointer_ > expected->stack_pointer_) {
+ int difference = stack_pointer_ - expected->stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ addq(rsp, Immediate(difference * kPointerSize));
+ }
+
+ // At this point, the frames should be identical.
+ ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Move registers, constants, and copies to memory. Perform moves
+ // from the top downward in the frame in order to leave the backing
+ // stores of copies in registers.
+ for (int i = element_count() - 1; i >= 0; i--) {
+ FrameElement target = expected->elements_[i];
+ if (target.is_register()) continue; // Handle registers later.
+ if (target.is_memory()) {
+ FrameElement source = elements_[i];
+ switch (source.type()) {
+ case FrameElement::INVALID:
+ // Not a legal merge move.
+ UNREACHABLE();
+ break;
+
+ case FrameElement::MEMORY:
+ // Already in place.
+ break;
+
+ case FrameElement::REGISTER:
+ Unuse(source.reg());
+ if (!source.is_synced()) {
+ __ movq(Operand(rbp, fp_relative(i)), source.reg());
+ }
+ break;
+
+ case FrameElement::CONSTANT:
+ if (!source.is_synced()) {
+ __ Move(Operand(rbp, fp_relative(i)), source.handle());
+ }
+ break;
+
+ case FrameElement::COPY:
+ if (!source.is_synced()) {
+ int backing_index = source.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ __ movq(kScratchRegister,
+ Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
+ } else {
+ ASSERT(backing_element.is_register());
+ __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
+ }
+ }
+ break;
+ }
+ }
+ elements_[i] = target;
+ }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+ // We have already done X-to-memory moves.
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ // Move the right value into register i if it is currently in a register.
+ int index = expected->register_location(i);
+ int use_index = register_location(i);
+ // Skip if register i is unused in the target or else if source is
+ // not a register (this is not a register-to-register move).
+ if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+ Register target = RegisterAllocator::ToRegister(i);
+ Register source = elements_[index].reg();
+ if (index != use_index) {
+ if (use_index == kIllegalIndex) { // Target is currently unused.
+ // Copy contents of source from source to target.
+ // Set frame element register to target.
+ Use(target, index);
+ Unuse(source);
+ __ movq(target, source);
+ } else {
+ // Exchange contents of registers source and target.
+ // Nothing except the register backing use_index has changed.
+ elements_[use_index].set_reg(source);
+ set_register_location(target, index);
+ set_register_location(source, use_index);
+ __ xchg(source, target);
+ }
+ }
+
+ if (!elements_[index].is_synced() &&
+ expected->elements_[index].is_synced()) {
+ __ movq(Operand(rbp, fp_relative(index)), target);
+ }
+ elements_[index] = expected->elements_[index];
+ }
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+ // Move memory, constants, and copies to registers. This is the
+ // final step and since it is not done from the bottom up, but in
+ // register code order, we have special code to ensure that the backing
+ // elements of copies are in their correct locations when we
+ // encounter the copies.
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int index = expected->register_location(i);
+ if (index != kIllegalIndex) {
+ FrameElement source = elements_[index];
+ FrameElement target = expected->elements_[index];
+ Register target_reg = RegisterAllocator::ToRegister(i);
+ ASSERT(target.reg().is(target_reg));
+ switch (source.type()) {
+ case FrameElement::INVALID: // Fall through.
+ UNREACHABLE();
+ break;
+ case FrameElement::REGISTER:
+ ASSERT(source.Equals(target));
+ // Go to next iteration. Skips Use(target_reg) and syncing
+ // below. It is safe to skip syncing because a target
+ // register frame element would only be synced if all source
+ // elements were.
+ continue;
+ break;
+ case FrameElement::MEMORY:
+ ASSERT(index <= stack_pointer_);
+ __ movq(target_reg, Operand(rbp, fp_relative(index)));
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(target_reg, source.handle());
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = source.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ ASSERT(backing_index <= stack_pointer_);
+ // Code optimization if backing store should also move
+ // to a register: move backing store to its register first.
+ if (expected->elements_[backing_index].is_register()) {
+ FrameElement new_backing = expected->elements_[backing_index];
+ Register new_backing_reg = new_backing.reg();
+ ASSERT(!is_used(new_backing_reg));
+ elements_[backing_index] = new_backing;
+ Use(new_backing_reg, backing_index);
+ __ movq(new_backing_reg,
+ Operand(rbp, fp_relative(backing_index)));
+ __ movq(target_reg, new_backing_reg);
+ } else {
+ __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
+ }
+ } else {
+ __ movq(target_reg, backing.reg());
+ }
+ }
+ }
+ // Ensure the proper sync state.
+ if (target.is_synced() && !source.is_synced()) {
+ __ movq(Operand(rbp, fp_relative(index)), target_reg);
+ }
+ Use(target_reg, index);
+ elements_[index] = target;
+ }
+ }
+}
+
+
+Result VirtualFrame::Pop() {
+ FrameElement element = elements_.RemoveLast();
+ int index = element_count();
+ ASSERT(element.is_valid());
+
+ bool pop_needed = (stack_pointer_ == index);
+ if (pop_needed) {
+ stack_pointer_--;
+ if (element.is_memory()) {
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ pop(temp.reg());
+ return temp;
+ }
+
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ ASSERT(!element.is_memory());
+
+ // The top element is a register, constant, or a copy. Unuse
+ // registers and follow copies to their backing store.
+ if (element.is_register()) {
+ Unuse(element.reg());
+ } else if (element.is_copy()) {
+ ASSERT(element.index() < index);
+ index = element.index();
+ element = elements_[index];
+ }
+ ASSERT(!element.is_copy());
+
+ // The element is memory, a register, or a constant.
+ if (element.is_memory()) {
+ // Memory elements could only be the backing store of a copy.
+ // Allocate the original to a register.
+ ASSERT(index <= stack_pointer_);
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ Use(temp.reg(), index);
+ FrameElement new_element =
+ FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+ // Preserve the copy flag on the element.
+ if (element.is_copied()) new_element.set_copied();
+ elements_[index] = new_element;
+ __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
+ return Result(temp.reg());
+ } else if (element.is_register()) {
+ return Result(element.reg());
+ } else {
+ ASSERT(element.is_constant());
+ return Result(element.handle());
+ }
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallStub(stub);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+ PrepareForCall(0, 0);
+ arg->ToRegister(rax);
+ arg->Unuse();
+ return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+ PrepareForCall(0, 0);
+
+ if (arg0->is_register() && arg0->reg().is(rax)) {
+ if (arg1->is_register() && arg1->reg().is(rdx)) {
+ // Wrong registers.
+ __ xchg(rax, rdx);
+ } else {
+ // Register rdx is free for arg0, which frees rax for arg1.
+ arg0->ToRegister(rdx);
+ arg1->ToRegister(rax);
+ }
+ } else {
+ // Register rax is free for arg1, which guarantees rdx is free for
+ // arg0.
+ arg1->ToRegister(rax);
+ arg0->ToRegister(rdx);
+ }
+
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallStub(stub);
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+ // Emit code to write elements below the stack pointer to their
+ // (already allocated) stack address.
+ ASSERT(index <= stack_pointer_);
+ FrameElement element = elements_[index];
+ ASSERT(!element.is_synced());
+ switch (element.type()) {
+ case FrameElement::INVALID:
+ break;
+
+ case FrameElement::MEMORY:
+ // This function should not be called with synced elements.
+ // (memory elements are always synced).
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ movq(Operand(rbp, fp_relative(index)), element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(Operand(rbp, fp_relative(index)), element.handle());
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing_element = elements_[backing_index];
+ if (backing_element.is_memory()) {
+ __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
+ __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
+ } else {
+ ASSERT(backing_element.is_register());
+ __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
+ }
+ break;
+ }
+ }
+ elements_[index].set_sync();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+ // Sync an element of the frame that is just above the stack pointer
+ // by pushing it.
+ ASSERT(index == stack_pointer_ + 1);
+ stack_pointer_++;
+ FrameElement element = elements_[index];
+
+ switch (element.type()) {
+ case FrameElement::INVALID:
+ __ push(Immediate(Smi::FromInt(0)));
+ break;
+
+ case FrameElement::MEMORY:
+ // No memory elements exist above the stack pointer.
+ UNREACHABLE();
+ break;
+
+ case FrameElement::REGISTER:
+ __ push(element.reg());
+ break;
+
+ case FrameElement::CONSTANT:
+ __ Move(kScratchRegister, element.handle());
+ __ push(kScratchRegister);
+ break;
+
+ case FrameElement::COPY: {
+ int backing_index = element.index();
+ FrameElement backing = elements_[backing_index];
+ ASSERT(backing.is_memory() || backing.is_register());
+ if (backing.is_memory()) {
+ __ push(Operand(rbp, fp_relative(backing_index)));
+ } else {
+ __ push(backing.reg());
+ }
+ break;
+ }
+ }
+ elements_[index].set_sync();
+}
+
+
+// Clear the dirty bits for the range of elements in
+// [min(stack_pointer_ + 1,begin), end].
+void VirtualFrame::SyncRange(int begin, int end) {
+ ASSERT(begin >= 0);
+ ASSERT(end < element_count());
+ // Sync elements below the range if they have not been materialized
+ // on the stack.
+ int start = Min(begin, stack_pointer_ + 1);
+
+ // If positive we have to adjust the stack pointer.
+ int delta = end - stack_pointer_;
+ if (delta > 0) {
+ stack_pointer_ = end;
+ __ subq(rsp, Immediate(delta * kPointerSize));
+ }
+
+ for (int i = start; i <= end; i++) {
+ if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+ }
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ InvokeBuiltin(id, flag);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+//------------------------------------------------------------------------------
+// Virtual frame stub and IC calling functions.
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(f, arg_count);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(id, arg_count);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
+ // Name and receiver are on the top of the frame. The IC expects
+ // name in rcx and receiver on the stack. It does not drop the
+ // receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Result name = Pop();
+ PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+ name.ToRegister(rcx);
+ name.Unuse();
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
+ // Key and receiver are on top of the frame. The IC expects them on
+ // the stack. It does not drop them.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+ // Value, key, and receiver are on the top of the frame. The IC
+ // expects value in rax and key and receiver on the stack. It does
+ // not drop the key and receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ Result value = Pop();
+ PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ value.ToRegister(rax);
+ value.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
+ int arg_count,
+ int loop_nesting) {
+ // Arguments, receiver, and function name are on top of the frame.
+ // The IC expects them on the stack. It does not drop the function
+ // name slot (but it does drop the rest).
+ InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+ // Spill args, receiver, and function. The call will drop args and
+ // receiver.
+ PrepareForCall(arg_count + 2, arg_count + 1);
+ return RawCallCodeObject(ic, mode);
+}
+
+
+Result VirtualFrame::CallConstructor(int arg_count) {
+ // Arguments, receiver, and function are on top of the frame. The
+ // IC expects arg count in rax, function in rdi, and the arguments
+ // and receiver on the stack.
+ Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ // Duplicate the function before preparing the frame.
+ PushElementAt(arg_count + 1);
+ Result function = Pop();
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ function.ToRegister(rdi);
+
+ // Constructors are called with the number of arguments in register
+ // eax for now. Another option would be to have separate construct
+ // call trampolines per different arguments counts encountered.
+ Result num_args = cgen()->allocator()->Allocate(rax);
+ ASSERT(num_args.is_valid());
+ __ movq(num_args.reg(), Immediate(arg_count));
+
+ function.Unuse();
+ num_args.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
+}
+
+
+Result VirtualFrame::CallStoreIC() {
+ // Name, value, and receiver are on top of the frame. The IC
+ // expects name in rcx, value in rax, and receiver on the stack. It
+ // does not drop the receiver.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Result name = Pop();
+ Result value = Pop();
+ PrepareForCall(1, 0); // One stack arg, not callee-dropped.
+
+ if (value.is_register() && value.reg().is(rcx)) {
+ if (name.is_register() && name.reg().is(rax)) {
+ // Wrong registers.
+ __ xchg(rax, rcx);
+ } else {
+ // Register rax is free for value, which frees rcx for name.
+ value.ToRegister(rax);
+ name.ToRegister(rcx);
+ }
+ } else {
+ // Register rcx is free for name, which guarantees rax is free for
+ // value.
+ name.ToRegister(rcx);
+ value.ToRegister(rax);
+ }
+
+ name.Unuse();
+ value.Unuse();
+ return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ // Grow the expression stack by handler size less one (the return
+ // address is already pushed by a call instruction).
+ Adjust(kHandlerSize - 1);
+ __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.h b/V8Binding/v8/src/x64/virtual-frame-x64.h
index d341a1e..577a18b 100644
--- a/V8Binding/v8/src/x64/virtual-frame-x64.h
+++ b/V8Binding/v8/src/x64/virtual-frame-x64.h
@@ -153,11 +153,8 @@ class VirtualFrame : public ZoneObject {
void SyncRange(int begin, int end);
// Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the
- // topmost mergable_elements elements of the frame. A
- // mergable_elements of JumpTarget::kAllElements indicates constants
- // and copies are should be removed from the entire frame.
- void MakeMergable(int mergable_elements);
+ // be merged to it. Copies and constants are removed from the frame.
+ void MakeMergable();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
@@ -310,8 +307,8 @@ class VirtualFrame : public ZoneObject {
// even a register. The argument is consumed by the call.
Result CallStub(CodeStub* stub, Result* arg);
- // Call stub that takes a pair of arguments passed in edx (arg0) and
- // eax (arg1). The arguments are given as results which do not have
+ // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
+ // eax (arg1, rax). The arguments are given as results which do not have
// to be in the proper registers or even in registers. The
// arguments are consumed by the call.
Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
@@ -372,16 +369,18 @@ class VirtualFrame : public ZoneObject {
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
- void EmitPop(Operand operand);
+ void EmitPop(const Operand& operand);
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
- void EmitPush(Operand operand);
+ void EmitPush(const Operand& operand);
void EmitPush(Immediate immediate);
+ // Uses kScratchRegister, emits appropriate relocation info.
+ void EmitPush(Handle<Object> value);
// Push an element on the virtual frame.
- void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
@@ -389,7 +388,7 @@ class VirtualFrame : public ZoneObject {
// frame).
void Push(Result* result) {
if (result->is_register()) {
- Push(result->reg(), result->static_type());
+ Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
diff --git a/V8Binding/v8/test/cctest/cctest.status b/V8Binding/v8/test/cctest/cctest.status
index a8c2180..68aabb5 100644
--- a/V8Binding/v8/test/cctest/cctest.status
+++ b/V8Binding/v8/test/cctest/cctest.status
@@ -30,6 +30,10 @@ prefix cctest
# BUG(281): This test fails on some Linuxes.
test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
+# BUG(382): Weird test. Can't guarantee that it never times out.
+test-api/ApplyInterruption: PASS || TIMEOUT
+
+
[ $arch == arm ]
test-debug: SKIP
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 48157d8..426b720 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -551,6 +551,7 @@ THREADED_TEST(UsingExternalString) {
CHECK(isymbol->IsSymbol());
}
i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage();
}
@@ -568,6 +569,7 @@ THREADED_TEST(UsingExternalAsciiString) {
CHECK(isymbol->IsSymbol());
}
i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage();
}
@@ -2281,7 +2283,7 @@ static v8::Handle<Value> XPropertyGetter(Local<String> property,
}
-THREADED_TEST(NamedInterceporPropertyRead) {
+THREADED_TEST(NamedInterceptorPropertyRead) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter);
@@ -2294,6 +2296,58 @@ THREADED_TEST(NamedInterceporPropertyRead) {
}
}
+
+static v8::Handle<Value> IndexedPropertyGetter(uint32_t index,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index == 37) {
+ return v8::Handle<Value>(v8_num(625));
+ }
+ return v8::Handle<Value>();
+}
+
+
+static v8::Handle<Value> IndexedPropertySetter(uint32_t index,
+ Local<Value> value,
+ const AccessorInfo& info) {
+ ApiTestFuzzer::Fuzz();
+ if (index == 39) {
+ return value;
+ }
+ return v8::Handle<Value>();
+}
+
+
+THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
+ IndexedPropertySetter);
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ Local<Script> getter_script = Script::Compile(v8_str(
+ "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];"));
+ Local<Script> setter_script = Script::Compile(v8_str(
+ "obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
+ "obj[17] = 23;"
+ "obj.foo;"));
+ Local<Script> interceptor_setter_script = Script::Compile(v8_str(
+ "obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
+ "obj[39] = 47;"
+ "obj.foo;")); // This setter should not run, due to the interceptor.
+ Local<Script> interceptor_getter_script = Script::Compile(v8_str(
+ "obj[37];"));
+ Local<Value> result = getter_script->Run();
+ CHECK_EQ(v8_num(5), result);
+ result = setter_script->Run();
+ CHECK_EQ(v8_num(23), result);
+ result = interceptor_setter_script->Run();
+ CHECK_EQ(v8_num(23), result);
+ result = interceptor_getter_script->Run();
+ CHECK_EQ(v8_num(625), result);
+}
+
+
THREADED_TEST(MultiContexts) {
v8::HandleScope scope;
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@@ -5008,6 +5062,22 @@ THREADED_TEST(InterceptorStoreIC) {
}
+THREADED_TEST(InterceptorStoreICWithNoSetter) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+ LocalContext context;
+ context->Global()->Set(v8_str("o"), templ->NewInstance());
+ v8::Handle<Value> value = CompileRun(
+ "for (var i = 0; i < 1000; i++) {"
+ " o.y = 239;"
+ "}"
+ "42 + o.y");
+ CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+
v8::Handle<Value> call_ic_function;
v8::Handle<Value> call_ic_function2;
@@ -5970,6 +6040,7 @@ THREADED_TEST(DisableAccessChecksWhileConfiguring) {
CHECK(value->BooleanValue());
}
+
static bool NamedGetAccessBlocker(Local<v8::Object> obj,
Local<Value> name,
v8::AccessType type,
@@ -6023,6 +6094,7 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
CHECK(value_2->IsUndefined());
}
+
// This tests that access check information remains on the global
// object template when creating contexts.
THREADED_TEST(AccessControlRepeatedContextCreation) {
@@ -6041,6 +6113,71 @@ THREADED_TEST(AccessControlRepeatedContextCreation) {
}
+THREADED_TEST(TurnOnAccessCheck) {
+ v8::HandleScope handle_scope;
+
+ // Create an environment with access check to the global object disabled by
+ // default.
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
+ IndexedGetAccessBlocker,
+ v8::Handle<v8::Value>(),
+ false);
+ v8::Persistent<Context> context = Context::New(NULL, global_template);
+ Context::Scope context_scope(context);
+
+ // Set up a property and a number of functions.
+ context->Global()->Set(v8_str("a"), v8_num(1));
+ CompileRun("function f1() {return a;}"
+ "function f2() {return a;}"
+ "function g1() {return h();}"
+ "function g2() {return h();}"
+ "function h() {return 1;}");
+ Local<Function> f1 =
+ Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
+ Local<Function> f2 =
+ Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
+ Local<Function> g1 =
+ Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
+ Local<Function> g2 =
+ Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
+ Local<Function> h =
+ Local<Function>::Cast(context->Global()->Get(v8_str("h")));
+
+ // Get the global object.
+ v8::Handle<v8::Object> global = context->Global();
+
+ // Call f1 one time and f2 a number of times. This will ensure that f1 still
+ // uses the runtime system to retreive property a whereas f2 uses global load
+ // inline cache.
+ CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1)));
+ for (int i = 0; i < 4; i++) {
+ CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1)));
+ }
+
+ // Same for g1 and g2.
+ CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1)));
+ for (int i = 0; i < 4; i++) {
+ CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1)));
+ }
+
+ // Detach the global and turn on access check.
+ context->DetachGlobal();
+ context->Global()->TurnOnAccessCheck();
+
+ // Failing access check to property get results in undefined.
+ CHECK(f1->Call(global, 0, NULL)->IsUndefined());
+ CHECK(f2->Call(global, 0, NULL)->IsUndefined());
+
+ // Failing access check to function call results in exception.
+ CHECK(g1->Call(global, 0, NULL).IsEmpty());
+ CHECK(g2->Call(global, 0, NULL).IsEmpty());
+
+ // No failing access check when just returning a constant.
+ CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1)));
+}
+
+
// This test verifies that pre-compilation (aka preparsing) can be called
// without initializing the whole VM. Thus we cannot run this test in a
// multi-threaded setup.
diff --git a/V8Binding/v8/test/cctest/test-assembler-x64.cc b/V8Binding/v8/test/cctest/test-assembler-x64.cc
index 43ba4e9..cd750c5 100644
--- a/V8Binding/v8/test/cctest/test-assembler-x64.cc
+++ b/V8Binding/v8/test/cctest/test-assembler-x64.cc
@@ -44,6 +44,7 @@ using v8::internal::Label;
using v8::internal::rax;
using v8::internal::rsi;
using v8::internal::rdi;
+using v8::internal::rdx;
using v8::internal::rbp;
using v8::internal::rsp;
using v8::internal::FUNCTION_CAST;
@@ -63,8 +64,8 @@ using v8::internal::greater;
// with GCC. A different convention is used on 64-bit windows.
typedef int (*F0)();
-typedef int (*F1)(int x);
-typedef int (*F2)(int x, int y);
+typedef int (*F1)(int64_t x);
+typedef int (*F2)(int64_t x, int64_t y);
#define __ assm.
@@ -130,9 +131,9 @@ TEST(AssemblerX64ArithmeticOperations) {
CHECK(buffer);
Assembler assm(buffer, actual_size);
- // Assemble a simple function that copies argument 2 and returns it.
+ // Assemble a simple function that adds arguments returning the sum.
__ movq(rax, rsi);
- __ add(rax, rdi);
+ __ addq(rax, rdi);
__ ret(0);
CodeDesc desc;
@@ -142,6 +143,33 @@ TEST(AssemblerX64ArithmeticOperations) {
CHECK_EQ(5, result);
}
+TEST(AssemblerX64ImulOperation) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ Assembler assm(buffer, actual_size);
+
+ // Assemble a simple function that multiplies arguments returning the high
+ // word.
+ __ movq(rax, rsi);
+ __ imul(rdi);
+ __ movq(rax, rdx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F2>(buffer)(3, 2);
+ CHECK_EQ(0, result);
+ result = FUNCTION_CAST<F2>(buffer)(0x100000000l, 0x100000000l);
+ CHECK_EQ(1, result);
+ result = FUNCTION_CAST<F2>(buffer)(-0x100000000l, 0x100000000l);
+ CHECK_EQ(-1, result);
+}
+
TEST(AssemblerX64MemoryOperands) {
// Allocate an executable page of memory.
size_t actual_size;
@@ -215,12 +243,12 @@ TEST(AssemblerX64LoopImmediates) {
Label Loop1_body;
__ jmp(&Loop1_test);
__ bind(&Loop1_body);
- __ add(rax, Immediate(7));
+ __ addq(rax, Immediate(7));
__ bind(&Loop1_test);
- __ cmp(rax, Immediate(20));
+ __ cmpq(rax, Immediate(20));
__ j(less_equal, &Loop1_body);
// Did the loop terminate with the expected value?
- __ cmp(rax, Immediate(25));
+ __ cmpq(rax, Immediate(25));
__ j(not_equal, &Fail);
Label Loop2_test;
@@ -228,12 +256,12 @@ TEST(AssemblerX64LoopImmediates) {
__ movq(rax, Immediate(0x11FEED00));
__ jmp(&Loop2_test);
__ bind(&Loop2_body);
- __ add(rax, Immediate(-0x1100));
+ __ addq(rax, Immediate(-0x1100));
__ bind(&Loop2_test);
- __ cmp(rax, Immediate(0x11FE8000));
+ __ cmpq(rax, Immediate(0x11FE8000));
__ j(greater, &Loop2_body);
// Did the loop terminate with the expected value?
- __ cmp(rax, Immediate(0x11FE7600));
+ __ cmpq(rax, Immediate(0x11FE7600));
__ j(not_equal, &Fail);
__ movq(rax, Immediate(1));
@@ -248,4 +276,5 @@ TEST(AssemblerX64LoopImmediates) {
int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result);
}
+
#undef __
diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc
index 92f48e1..a884d77 100644
--- a/V8Binding/v8/test/cctest/test-debug.cc
+++ b/V8Binding/v8/test/cctest/test-debug.cc
@@ -2237,6 +2237,52 @@ TEST(DebugStepKeyedLoadLoop) {
}
+// Test of the stepping mechanism for keyed store in a loop.
+TEST(DebugStepKeyedStoreLoop) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Create a function for testing stepping of keyed store. The statement 'y=1'
+ // is there to have more than one breakable statement in the loop, TODO(315).
+ v8::Local<v8::Function> foo = CompileFunction(
+ &env,
+ "function foo(a) {\n"
+ " var len = a.length;\n"
+ " for (var i = 0; i < len; i++) {\n"
+ " y = 1;\n"
+ " a[i] = 42;\n"
+ " }\n"
+ "}\n",
+ "foo");
+
+ // Create array [0,1,2,3,4,5,6,7,8,9]
+ v8::Local<v8::Array> a = v8::Array::New(10);
+ for (int i = 0; i < 10; i++) {
+ a->Set(v8::Number::New(i), v8::Number::New(i));
+ }
+
+ // Call function without any break points to ensure inlining is in place.
+ const int kArgc = 1;
+ v8::Handle<v8::Value> args[kArgc] = { a };
+ foo->Call(env->Global(), kArgc, args);
+
+ // Register a debug event listener which steps and counts.
+ v8::Debug::SetDebugEventListener(DebugEventStep);
+
+ // Setup break point and step through the function.
+ SetBreakPoint(foo, 3);
+ step_action = StepNext;
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), kArgc, args);
+
+ // With stepping all break locations are hit.
+ CHECK_EQ(22, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test the stepping mechanism with different ICs.
TEST(DebugStepLinearMixedICs) {
v8::HandleScope scope;
@@ -4189,57 +4235,83 @@ TEST(CallFunctionInDebugger) {
}
+// Debugger message handler which counts the number of breaks.
+static void SendContinueCommand();
+static void MessageHandlerBreakPointHitCount(
+ const v8::Debug::Message& message) {
+ if (message.IsEvent() && message.GetEvent() == v8::Break) {
+ // Count the number of breaks.
+ break_point_hit_count++;
+
+ SendContinueCommand();
+ }
+}
+
+
// Test that clearing the debug event listener actually clears all break points
// and related information.
TEST(DebuggerUnload) {
- v8::HandleScope scope;
DebugLocalContext env;
// Check debugger is unloaded before it is used.
CheckDebuggerUnloaded();
- // Add debug event listener.
+ // Set a debug event listener.
+ break_point_hit_count = 0;
v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
v8::Undefined());
- // Create a couple of functions for the test.
- v8::Local<v8::Function> foo =
- CompileFunction(&env, "function foo(){x=1}", "foo");
- v8::Local<v8::Function> bar =
- CompileFunction(&env, "function bar(){y=2}", "bar");
-
- // Set some break points.
- SetBreakPoint(foo, 0);
- SetBreakPoint(foo, 4);
- SetBreakPoint(bar, 0);
- SetBreakPoint(bar, 4);
-
- // Make sure that the break points are there.
- break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
- CHECK_EQ(2, break_point_hit_count);
- bar->Call(env->Global(), 0, NULL);
- CHECK_EQ(4, break_point_hit_count);
+ {
+ v8::HandleScope scope;
+ // Create a couple of functions for the test.
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){x=1}", "foo");
+ v8::Local<v8::Function> bar =
+ CompileFunction(&env, "function bar(){y=2}", "bar");
+
+ // Set some break points.
+ SetBreakPoint(foo, 0);
+ SetBreakPoint(foo, 4);
+ SetBreakPoint(bar, 0);
+ SetBreakPoint(bar, 4);
+
+ // Make sure that the break points are there.
+ break_point_hit_count = 0;
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+ bar->Call(env->Global(), 0, NULL);
+ CHECK_EQ(4, break_point_hit_count);
+ }
- // Remove the debug event listener without clearing breakpoints.
+ // Remove the debug event listener without clearing breakpoints. Do this
+ // outside a handle scope.
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded(true);
- // Set a new debug event listener.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
- v8::Undefined());
- // Check that the break points was actually cleared.
+ // Now set a debug message handler.
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
- CHECK_EQ(0, break_point_hit_count);
+ v8::Debug::SetMessageHandler2(MessageHandlerBreakPointHitCount);
+ {
+ v8::HandleScope scope;
- // Set break points and run again.
- SetBreakPoint(foo, 0);
- SetBreakPoint(foo, 4);
- foo->Call(env->Global(), 0, NULL);
- CHECK_EQ(2, break_point_hit_count);
+ // Get the test functions again.
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+ v8::Local<v8::Function> bar =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
- // Remove the debug event listener without clearing breakpoints again.
- v8::Debug::SetDebugEventListener(NULL);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Set break points and run again.
+ SetBreakPoint(foo, 0);
+ SetBreakPoint(foo, 4);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+ }
+
+ // Remove the debug message handler without clearing breakpoints. Do this
+ // outside a handle scope.
+ v8::Debug::SetMessageHandler2(NULL);
CheckDebuggerUnloaded(true);
}
@@ -5185,3 +5257,40 @@ TEST(ExceptionMessageWhenMessageHandlerIsReset) {
CHECK_EQ(1, exception_event_count);
}
+
+
+// Tests after compile event is sent when there are some provisional
+// breakpoints out of the scripts lines range.
+TEST(ProvisionalBreakpointOnLineOutOfRange) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+ const char* script = "function f() {};";
+ const char* resource_name = "test_resource";
+
+ // Set a couple of provisional breakpoint on lines out of the script lines
+ // range.
+ int sbp1 = SetScriptBreakPointByNameFromJS(resource_name, 3,
+ -1 /* no column */);
+ int sbp2 = SetScriptBreakPointByNameFromJS(resource_name, 5, 5);
+
+ after_compile_message_count = 0;
+ v8::Debug::SetMessageHandler2(AfterCompileMessageHandler);
+
+ v8::ScriptOrigin origin(
+ v8::String::New(resource_name),
+ v8::Integer::New(10),
+ v8::Integer::New(1));
+ // Compile a script whose first line number is greater than the breakpoints'
+ // lines.
+ v8::Script::Compile(v8::String::New(script), &origin)->Run();
+
+ // If the script is compiled successfully there is exactly one after compile
+ // event. In case of an exception in debugger code after compile event is not
+ // sent.
+ CHECK_EQ(1, after_compile_message_count);
+
+ ClearBreakPointFromJS(sbp1);
+ ClearBreakPointFromJS(sbp2);
+ v8::Debug::SetMessageHandler2(NULL);
+}
diff --git a/V8Binding/v8/test/cctest/test-func-name-inference.cc b/V8Binding/v8/test/cctest/test-func-name-inference.cc
index 1bfc883..28e8649 100644
--- a/V8Binding/v8/test/cctest/test-func-name-inference.cc
+++ b/V8Binding/v8/test/cctest/test-func-name-inference.cc
@@ -251,3 +251,17 @@ TEST(MultipleFuncsInLiteral) {
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method1");
}
+
+
+// See http://code.google.com/p/v8/issues/detail?id=380
+TEST(Issue380) {
+ InitializeVM();
+ v8::HandleScope scope;
+
+ v8::Handle<v8::Script> script = Compile(
+ "function a() {\n"
+ "var result = function(p,a,c,k,e,d)"
+ "{return p}(\"if blah blah\",62,1976,\'a|b\'.split(\'|\'),0,{})\n"
+ "}");
+ CheckFunctionName(script, "return p", "");
+}
diff --git a/V8Binding/v8/test/cctest/test-heap.cc b/V8Binding/v8/test/cctest/test-heap.cc
index 515657f..396bcc5 100644
--- a/V8Binding/v8/test/cctest/test-heap.cc
+++ b/V8Binding/v8/test/cctest/test-heap.cc
@@ -208,7 +208,7 @@ TEST(GarbageCollection) {
v8::HandleScope sc;
// check GC when heap is empty
- int free_bytes = Heap::MaxHeapObjectSize();
+ int free_bytes = Heap::MaxObjectSizeInPagedSpace();
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
// allocate a function and keep it in global object's property
@@ -782,7 +782,7 @@ TEST(Iteration) {
Factory::NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
- int large_size = Heap::MaxHeapObjectSize() + 1;
+ int large_size = Heap::MaxObjectSizeInPagedSpace() + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
diff --git a/V8Binding/v8/test/cctest/test-log-utils.cc b/V8Binding/v8/test/cctest/test-log-utils.cc
index 64e5900..a08a0a1 100644
--- a/V8Binding/v8/test/cctest/test-log-utils.cc
+++ b/V8Binding/v8/test/cctest/test-log-utils.cc
@@ -9,8 +9,12 @@
#include "log-utils.h"
#include "cctest.h"
+using v8::internal::CStrVector;
using v8::internal::EmbeddedVector;
using v8::internal::LogDynamicBuffer;
+using v8::internal::LogRecordCompressor;
+using v8::internal::MutableCStrVector;
+using v8::internal::ScopedVector;
using v8::internal::Vector;
// Fills 'ref_buffer' with test data: a sequence of two-digit
@@ -47,9 +51,13 @@ static inline void CheckEqualsHelper(const char* file, int line,
const Vector<V>& value) {
if (expected.length() != value.length()) {
V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
- "# Vectors lengths differ: %d expected, %d found",
+ "# Vectors lengths differ: %d expected, %d found\n"
+ "# Expected: %.*s\n"
+ "# Found: %.*s",
expected_source, value_source,
- expected.length(), value.length());
+ expected.length(), value.length(),
+ expected.length(), expected.start(),
+ value.length(), value.start());
}
if (strncmp(expected.start(), value.start(), expected.length()) != 0) {
V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
@@ -124,9 +132,178 @@ TEST(DynaBufSealing) {
// Check the seal.
EmbeddedVector<char, 50> seal_buf;
CHECK_EQ(seal_size, ReadData(&dynabuf, 100, &seal_buf));
- CHECK_EQ(v8::internal::CStrVector(seal), seal_buf.SubVector(0, seal_size));
+ CHECK_EQ(CStrVector(seal), seal_buf.SubVector(0, seal_size));
// Verify that there's no data beyond the seal.
CHECK_EQ(0, ReadData(&dynabuf, 100 + seal_size, &buf));
}
+
+TEST(CompressorStore) {
+ LogRecordCompressor comp(2);
+ const Vector<const char> empty = CStrVector("");
+ CHECK(comp.Store(empty));
+ CHECK(!comp.Store(empty));
+ CHECK(!comp.Store(empty));
+ const Vector<const char> aaa = CStrVector("aaa");
+ CHECK(comp.Store(aaa));
+ CHECK(!comp.Store(aaa));
+ CHECK(!comp.Store(aaa));
+ CHECK(comp.Store(empty));
+ CHECK(!comp.Store(empty));
+ CHECK(!comp.Store(empty));
+}
+
+
+void CheckCompression(LogRecordCompressor* comp,
+ const Vector<const char>& after) {
+ EmbeddedVector<char, 100> result;
+ CHECK(comp->RetrievePreviousCompressed(&result));
+ CHECK_EQ(after, result);
+}
+
+
+void CheckCompression(LogRecordCompressor* comp,
+ const char* after) {
+ CheckCompression(comp, CStrVector(after));
+}
+
+
+TEST(CompressorNonCompressed) {
+ LogRecordCompressor comp(0);
+ CHECK(!comp.RetrievePreviousCompressed(NULL));
+ const Vector<const char> empty = CStrVector("");
+ CHECK(comp.Store(empty));
+ CHECK(!comp.RetrievePreviousCompressed(NULL));
+ const Vector<const char> a_x_20 = CStrVector("aaaaaaaaaaaaaaaaaaaa");
+ CHECK(comp.Store(a_x_20));
+ CheckCompression(&comp, empty);
+ CheckCompression(&comp, empty);
+ CHECK(comp.Store(empty));
+ CheckCompression(&comp, a_x_20);
+ CheckCompression(&comp, a_x_20);
+}
+
+
+TEST(CompressorSingleLine) {
+ LogRecordCompressor comp(1);
+ const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_1));
+ const Vector<const char> string_2 = CStrVector("fff,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_2));
+ // string_1 hasn't been compressed.
+ CheckCompression(&comp, string_1);
+ CheckCompression(&comp, string_1);
+ const Vector<const char> string_3 = CStrVector("hhh,ggg,ccc,bbb,aaa");
+ CHECK(comp.Store(string_3));
+ // string_2 compressed using string_1.
+ CheckCompression(&comp, "fff#1:3");
+ CheckCompression(&comp, "fff#1:3");
+ CHECK(!comp.Store(string_3));
+ // Expecting no changes.
+ CheckCompression(&comp, "fff#1:3");
+ CHECK(!comp.Store(string_3));
+ // Expecting no changes.
+ CheckCompression(&comp, "fff#1:3");
+ const Vector<const char> string_4 = CStrVector("iii,hhh,ggg,ccc,bbb,aaa");
+ CHECK(comp.Store(string_4));
+ // string_3 compressed using string_2.
+ CheckCompression(&comp, "hhh,ggg#1:7");
+ const Vector<const char> string_5 = CStrVector("nnn,mmm,lll,kkk,jjj");
+ CHECK(comp.Store(string_5));
+ // string_4 compressed using string_3.
+ CheckCompression(&comp, "iii,#1");
+ const Vector<const char> string_6 = CStrVector("nnn,mmmmmm,lll,kkk,jjj");
+ CHECK(comp.Store(string_6));
+ // string_5 hasn't been compressed.
+ CheckCompression(&comp, string_5);
+ CHECK(comp.Store(string_5));
+ // string_6 compressed using string_5.
+ CheckCompression(&comp, "nnn,mmm#1:4");
+ const Vector<const char> string_7 = CStrVector("nnnnnn,mmm,lll,kkk,jjj");
+ CHECK(comp.Store(string_7));
+ // string_5 compressed using string_6.
+ CheckCompression(&comp, "nnn,#1:7");
+ const Vector<const char> string_8 = CStrVector("xxn,mmm,lll,kkk,jjj");
+ CHECK(comp.Store(string_8));
+ // string_7 compressed using string_5.
+ CheckCompression(&comp, "nnn#1");
+ const Vector<const char> string_9 =
+ CStrVector("aaaaaaaaaaaaa,bbbbbbbbbbbbbbbbb");
+ CHECK(comp.Store(string_9));
+ // string_8 compressed using string_7.
+ CheckCompression(&comp, "xx#1:5");
+ const Vector<const char> string_10 =
+ CStrVector("aaaaaaaaaaaaa,cccccccbbbbbbbbbb");
+ CHECK(comp.Store(string_10));
+ // string_9 hasn't been compressed.
+ CheckCompression(&comp, string_9);
+ CHECK(comp.Store(string_1));
+ // string_10 compressed using string_9.
+ CheckCompression(&comp, "aaaaaaaaaaaaa,ccccccc#1:21");
+}
+
+
+
+TEST(CompressorMultiLines) {
+ const int kWindowSize = 3;
+ LogRecordCompressor comp(kWindowSize);
+ const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_1));
+ const Vector<const char> string_2 = CStrVector("iii,hhh,ggg,fff,aaa");
+ CHECK(comp.Store(string_2));
+ const Vector<const char> string_3 = CStrVector("mmm,lll,kkk,jjj,aaa");
+ CHECK(comp.Store(string_3));
+ const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
+ CHECK(comp.Store(string_4));
+ const Vector<const char> string_5 = CStrVector("ooo,lll,kkk,jjj,aaa");
+ CHECK(comp.Store(string_5));
+ // string_4 compressed using string_2.
+ CheckCompression(&comp, "nnn#2:3");
+ CHECK(comp.Store(string_1));
+ // string_5 compressed using string_3.
+ CheckCompression(&comp, "ooo#2:3");
+ CHECK(comp.Store(string_4));
+ // string_1 is out of buffer by now, so it shouldn't be compressed.
+ CHECK_GE(3, kWindowSize);
+ CheckCompression(&comp, string_1);
+ CHECK(comp.Store(string_2));
+ // string_4 compressed using itself.
+ CheckCompression(&comp, "#3");
+}
+
+
+TEST(CompressorBestSelection) {
+ LogRecordCompressor comp(3);
+ const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_1));
+ const Vector<const char> string_2 = CStrVector("ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_2));
+ const Vector<const char> string_3 = CStrVector("fff,eee,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_3));
+ // string_2 compressed using string_1.
+ CheckCompression(&comp, "#1:4");
+ const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
+ CHECK(comp.Store(string_4));
+ // Compressing string_3 using string_1 gives a better compression than
+ // using string_2.
+ CheckCompression(&comp, "fff,#2");
+}
+
+
+TEST(CompressorCompressibility) {
+ LogRecordCompressor comp(2);
+ const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+ CHECK(comp.Store(string_1));
+ const Vector<const char> string_2 = CStrVector("ccc,bbb,aaa");
+ CHECK(comp.Store(string_2));
+ const Vector<const char> string_3 = CStrVector("aaa");
+ CHECK(comp.Store(string_3));
+ // string_2 compressed using string_1.
+ CheckCompression(&comp, "#1:8");
+ const Vector<const char> string_4 = CStrVector("xxx");
+ CHECK(comp.Store(string_4));
+ // string_3 can't be compressed using string_2 --- too short.
+ CheckCompression(&comp, string_3);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/V8Binding/v8/test/cctest/test-mark-compact.cc b/V8Binding/v8/test/cctest/test-mark-compact.cc
index 53cff68..8db7339 100644
--- a/V8Binding/v8/test/cctest/test-mark-compact.cc
+++ b/V8Binding/v8/test/cctest/test-mark-compact.cc
@@ -86,8 +86,8 @@ TEST(Promotion) {
v8::HandleScope sc;
// Allocate a fixed array in the new space.
- int array_size =
- (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / (kPointerSize * 4);
+ int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+ (kPointerSize * 4);
Object* obj = Heap::AllocateFixedArray(array_size);
CHECK(!obj->IsFailure());
@@ -118,7 +118,8 @@ TEST(NoPromotion) {
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// Allocate a big Fixed array in the new space.
- int size = (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / kPointerSize;
+ int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+ kPointerSize;
Object* obj = Heap::AllocateFixedArray(size);
Handle<FixedArray> array(FixedArray::cast(obj));
diff --git a/V8Binding/v8/test/message/overwritten-builtins.js b/V8Binding/v8/test/message/overwritten-builtins.js
new file mode 100644
index 0000000..8a838de
--- /dev/null
+++ b/V8Binding/v8/test/message/overwritten-builtins.js
@@ -0,0 +1,31 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+String.prototype.split = function() { return "SPLIT ERROR"; };
+Array.prototype.join = function() { return []; };
+
+undefined.x
diff --git a/V8Binding/v8/test/message/overwritten-builtins.out b/V8Binding/v8/test/message/overwritten-builtins.out
new file mode 100644
index 0000000..ccf2924
--- /dev/null
+++ b/V8Binding/v8/test/message/overwritten-builtins.out
@@ -0,0 +1,30 @@
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*%(basename)s:31: TypeError: Cannot read property 'x' of undefined
+undefined.x
+ ^
diff --git a/V8Binding/v8/test/mjsunit/arguments-apply.js b/V8Binding/v8/test/mjsunit/arguments-apply.js
new file mode 100644
index 0000000..5a91228
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/arguments-apply.js
@@ -0,0 +1,134 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function ReturnArguments() {
+ return arguments;
+}
+
+function ReturnReceiver() {
+ return this;
+}
+
+
+function Global() {
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(0, Global().length);
+assertEquals(1, Global(1).length);
+assertEquals(2, Global(2)[0]);
+assertEquals(2, Global(3, 4).length);
+assertEquals(3, Global(3, 4)[0]);
+assertEquals(4, Global(3, 4)[1]);
+
+
+function Local() {
+ var object = { f: ReturnArguments };
+ return object.f.apply(this, arguments);
+}
+
+assertEquals(0, Local().length);
+assertEquals(1, Local(1).length);
+assertEquals(2, Local(2)[0]);
+assertEquals(2, Local(3, 4).length);
+assertEquals(3, Local(3, 4)[0]);
+assertEquals(4, Local(3, 4)[1]);
+
+
+function ShadowArguments() {
+ var arguments = [3, 4];
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(2, ShadowArguments().length);
+assertEquals(3, ShadowArguments()[0]);
+assertEquals(4, ShadowArguments()[1]);
+
+
+function NonObjectReceiver(receiver) {
+ return ReturnReceiver.apply(receiver, arguments);
+}
+
+assertEquals(42, NonObjectReceiver(42));
+assertEquals("object", typeof NonObjectReceiver(42));
+assertTrue(NonObjectReceiver(42) instanceof Number);
+assertTrue(this === NonObjectReceiver(null));
+assertTrue(this === NonObjectReceiver(void 0));
+
+
+function FunctionReceiver() {
+ return ReturnReceiver.apply(Object, arguments);
+}
+
+assertTrue(Object === FunctionReceiver());
+
+
+function ShadowApply() {
+ function f() { return 42; }
+ f.apply = function() { return 87; }
+ return f.apply(this, arguments);
+}
+
+assertEquals(87, ShadowApply());
+assertEquals(87, ShadowApply(1));
+assertEquals(87, ShadowApply(1, 2));
+
+
+function CallNonFunction() {
+ var object = { apply: Function.prototype.apply };
+ return object.apply(this, arguments);
+}
+
+assertThrows(CallNonFunction, TypeError);
+
+
+// Make sure that the stack after the apply optimization is
+// in a valid state.
+function SimpleStackCheck() {
+ var sentinel = 42;
+ var result = ReturnArguments.apply(this, arguments);
+ assertTrue(result != null);
+ assertEquals(42, sentinel);
+}
+
+SimpleStackCheck();
+
+
+function ShadowArgumentsWithConstant() {
+ var arguments = null;
+ return ReturnArguments.apply(this, arguments);
+}
+
+assertEquals(0, ShadowArgumentsWithConstant().length);
+assertEquals(0, ShadowArgumentsWithConstant(1).length);
+assertEquals(0, ShadowArgumentsWithConstant(1, 2).length);
+
+
+// Make sure we can deal with unfolding lots of arguments on the
+// stack even in the presence of the apply optimizations.
+var array = new Array(2048);
+assertEquals(2048, Global.apply(this, array).length);
diff --git a/V8Binding/v8/test/mjsunit/arguments-lazy.js b/V8Binding/v8/test/mjsunit/arguments-lazy.js
new file mode 100644
index 0000000..794afc3
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/arguments-lazy.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure we don't allocate the arguments object over and
+// over again.
+function SharedLazyArguments() {
+ return arguments === arguments;
+}
+
+assertTrue(SharedLazyArguments());
+
+
+// Make sure that accessing arguments doesn't clobber any
+// local variables called arguments.
+function ArgumentsOverride(x) {
+ var arguments = 42;
+ x = x ? x : 0;
+ return x + arguments;
+}
+
+assertEquals(42, ArgumentsOverride());
+assertEquals(43, ArgumentsOverride(1));
+assertEquals(44, ArgumentsOverride(2,3));
diff --git a/V8Binding/v8/test/mjsunit/array-sort.js b/V8Binding/v8/test/mjsunit/array-sort.js
index ef75dcc..a082abc 100644
--- a/V8Binding/v8/test/mjsunit/array-sort.js
+++ b/V8Binding/v8/test/mjsunit/array-sort.js
@@ -214,6 +214,30 @@ TestNonArrayLongerLength(500000);
TestNonArrayLongerLength(Math.pow(2,32) - 1);
+function TestNonArrayWithAccessors() {
+ // Regression test for issue 346, more info at URL
+ // http://code.google.com/p/v8/issues/detail?id=346
+ // Reported by nth10sd, test based on this report.
+ var x = {};
+ x[0] = 42;
+ x.__defineGetter__("1", function(){return this.foo;});
+ x.__defineSetter__("1", function(val){this.foo = val;});
+ x[1] = 49
+ x[3] = 37;
+ x.length = 4;
+ Array.prototype.sort.call(x);
+ // Behavior of sort with accessors is undefined. This accessor is
+ // well-behaved (acts like a normal property), so it should work.
+ assertEquals(4, x.length, "sortaccessors length");
+ assertEquals(37, x[0], "sortaccessors first");
+ assertEquals(42, x[1], "sortaccessors second");
+ assertEquals(49, x[2], "sortaccessors third")
+ assertFalse(3 in x, "sortaccessors fourth");
+}
+
+TestNonArrayWithAccessors();
+
+
function TestInheritedElementSort(depth) {
var length = depth * 2 + 3;
var obj = {length: length};
@@ -268,7 +292,7 @@ function TestSparseInheritedElementSort(scale) {
assertEquals(i, y[i], name + "value" + i);
}
for (var i = 10; i < length; i++) {
- assertEquals(x.hasOwnProperty(i), y.hasOwnProperty(i),
+ assertEquals(x.hasOwnProperty(i), y.hasOwnProperty(i),
name + "hasundef" + i);
assertEquals(undefined, y[i], name+"undefined"+i);
if (x.hasOwnProperty(i)) {
@@ -282,7 +306,7 @@ TestSparseInheritedElementSort(100);
TestSparseInheritedElementSort(1000);
function TestSpecialCasesInheritedElementSort() {
-
+
var x = {
1:"d1",
2:"c1",
@@ -309,11 +333,11 @@ function TestSpecialCasesInheritedElementSort() {
}
};
Array.prototype.sort.call(x);
-
+
var name = "SpecialInherit-";
-
+
assertEquals(10000, x.length, name + "length");
- var sorted = ["a2", "a3", "b1", "b2", "c1", "c2", "d1", "d2", "e3",
+ var sorted = ["a2", "a3", "b1", "b2", "c1", "c2", "d1", "d2", "e3",
undefined, undefined, undefined];
for (var i = 0; i < sorted.length; i++) {
assertTrue(x.hasOwnProperty(i), name + "has" + i)
@@ -321,7 +345,6 @@ function TestSpecialCasesInheritedElementSort() {
}
assertFalse(x.hasOwnProperty(sorted.length), name + "haspost");
assertFalse(sorted.length in x, name + "haspost2");
-
assertTrue(x.hasOwnProperty(10), name + "hasundefined10");
assertEquals(undefined, x[10], name + "undefined10");
assertTrue(x.hasOwnProperty(100), name + "hasundefined100");
@@ -332,11 +355,8 @@ function TestSpecialCasesInheritedElementSort() {
assertEquals(undefined, x[2000], name + "undefined2000");
assertTrue(x.hasOwnProperty(8000), name + "hasundefined8000");
assertEquals(undefined, x[8000], name + "undefined8000");
-
assertFalse(x.hasOwnProperty(12000), name + "has12000");
assertEquals("XX", x[12000], name + "XX12000");
-
}
TestSpecialCasesInheritedElementSort();
-
diff --git a/V8Binding/v8/test/mjsunit/big-object-literal.js b/V8Binding/v8/test/mjsunit/big-object-literal.js
index 0099ce9..8417951 100644
--- a/V8Binding/v8/test/mjsunit/big-object-literal.js
+++ b/V8Binding/v8/test/mjsunit/big-object-literal.js
@@ -84,7 +84,7 @@ function testLiteral(size, array_in_middle) {
}
// The sizes to test.
-var sizes = [1, 2, 100, 200, 400];
+var sizes = [1, 2, 100, 200, 350];
// Run the test.
for (var i = 0; i < sizes.length; i++) {
diff --git a/V8Binding/v8/test/mjsunit/compare-nan.js b/V8Binding/v8/test/mjsunit/compare-nan.js
index 29818c8..fc40acc 100644
--- a/V8Binding/v8/test/mjsunit/compare-nan.js
+++ b/V8Binding/v8/test/mjsunit/compare-nan.js
@@ -28,17 +28,17 @@
var a = [NaN, -1, 0, 1, 1.2, -7.9, true, false, 'foo', '0', 'NaN' ];
for (var i in a) {
var x = a[i];
- assertFalse(NaN == x);
- assertFalse(NaN === x);
- assertFalse(NaN < x);
- assertFalse(NaN > x);
- assertFalse(NaN <= x);
- assertFalse(NaN >= x);
+ assertFalse(NaN == x, "NaN == " + x);
+ assertFalse(NaN === x, "NaN === " + x);
+ assertFalse(NaN < x, "NaN < " + x);
+ assertFalse(NaN > x, "NaN > " + x);
+ assertFalse(NaN <= x, "NaN <= " + x);
+ assertFalse(NaN >= x, "NaN >= " + x);
- assertFalse(x == NaN);
- assertFalse(x === NaN);
- assertFalse(x < NaN);
- assertFalse(x > NaN);
- assertFalse(x <= NaN);
- assertFalse(x >= NaN);
+ assertFalse(x == NaN, "" + x + " == NaN");
+ assertFalse(x === NaN, "" + x + " === NaN");
+ assertFalse(x < NaN, "" + x + " < NaN");
+ assertFalse(x > NaN, "" + x + " > NaN");
+ assertFalse(x <= NaN, "" + x + " <= NaN");
+ assertFalse(x >= NaN, "" + x + " >= NaN");
}
diff --git a/V8Binding/v8/test/mjsunit/date-parse.js b/V8Binding/v8/test/mjsunit/date-parse.js
index 4464727..56ceba3 100644
--- a/V8Binding/v8/test/mjsunit/date-parse.js
+++ b/V8Binding/v8/test/mjsunit/date-parse.js
@@ -254,7 +254,7 @@ testCasesMisc.forEach(testDateParseMisc);
for (var i = 0; i < 24 * 365 * 100; i += 95) {
var ms = i * (3600 * 1000);
var s = (new Date(ms)).toString();
- assertEquals(ms, Date.parse(s), s);
+ assertEquals(ms, Date.parse(s), "parse own: " + s);
}
// Negative tests.
diff --git a/V8Binding/v8/test/mjsunit/debug-scopes.js b/V8Binding/v8/test/mjsunit/debug-scopes.js
new file mode 100644
index 0000000..7b477e1
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/debug-scopes.js
@@ -0,0 +1,660 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// The functions used for testing backtraces. They are at the top to make the
+// testing of source line/column easier.
+
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+
+// Debug event listener which delegates.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ break_count++;
+ listener_called = true;
+ listener_delegate(exec_state)
+ }
+ } catch (e) {
+ exception = e;
+ }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a noew test.
+function BeginTest(name) {
+ test_name = name;
+ listener_delegate = null;
+ listener_called = false;
+ exception = null;
+ begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+ assertTrue(listener_called, "listerner not called for " + test_name);
+ assertNull(exception, test_name)
+ end_test_count++;
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+ assertEquals(scopes.length, exec_state.frame().scopeCount());
+ for (var i = 0; i < scopes.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(scopes[i], scope.scopeType());
+
+ // Check the global object when hitting the global scope.
+ if (scopes[i] == debug.ScopeType.Global) {
+ assertEquals(this, scope.scopeObject().value());
+ }
+ }
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor();
+
+ // Send a scopes request and check the result.
+ var json;
+ request_json = '{"seq":0,"type":"request","command":"scopes"}'
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scopes.length, response.body.scopes.length);
+ for (var i = 0; i < scopes.length; i++) {
+ assertEquals(i, response.body.scopes[i].index);
+ assertEquals(scopes[i], response.body.scopes[i].type);
+ if (scopes[i] == debug.ScopeType.Local ||
+ scopes[i] == debug.ScopeType.Closure) {
+ assertTrue(response.body.scopes[i].object.ref < 0);
+ } else {
+ assertTrue(response.body.scopes[i].object.ref >= 0);
+ }
+ var found = false;
+ for (var j = 0; j < response.refs.length && !found; j++) {
+ found = response.refs[j].handle == response.body.scopes[i].object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+ }
+}
+
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+ var scope = exec_state.frame().scope(number)
+ var count = 0;
+ for (var p in content) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+ if (typeof(content[p]) === 'function') {
+ assertTrue(property_mirror.value().isFunction());
+ } else {
+ assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+ }
+ count++;
+ }
+
+ // 'arguments' and might be exposed in the local and closure scope. Just
+ // ignore this.
+ var scope_size = scope.scopeObject().properties().length;
+ if (!scope.scopeObject().property('arguments').isUndefined()) {
+ scope_size--;
+ }
+ if (count != scope_size) {
+ print('Names found in scope:');
+ var names = scope.scopeObject().propertyNames();
+ for (var i = 0; i < names.length; i++) {
+ print(names[i]);
+ }
+ }
+ assertEquals(count, scope_size);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor();
+
+ // Send a scope request for information on a single scope and check the
+ // result.
+ request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":'
+ request_json += scope.scopeIndex();
+ request_json += '}}'
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scope.scopeType(), response.body.type);
+ assertEquals(number, response.body.index);
+ if (scope.scopeType() == debug.ScopeType.Local ||
+ scope.scopeType() == debug.ScopeType.Closure) {
+ assertTrue(response.body.object.ref < 0);
+ } else {
+ assertTrue(response.body.object.ref >= 0);
+ }
+ var found = false;
+ for (var i = 0; i < response.refs.length && !found; i++) {
+ found = response.refs[i].handle == response.body.object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+// Simple empty local scope.
+BeginTest("Local 1");
+
+function local_1() {
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+}
+local_1()
+EndTest();
+
+
+// Local scope with a parameter.
+BeginTest("Local 2");
+
+function local_2(a) {
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 0, exec_state);
+}
+local_2(1)
+EndTest();
+
+
+// Local scope with a parameter and a local variable.
+BeginTest("Local 3");
+
+function local_3(a) {
+ var x = 3;
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,x:3}, 0, exec_state);
+}
+local_3(1)
+EndTest();
+
+
+// Local scope with parameters and local variables.
+BeginTest("Local 4");
+
+function local_4(a, b) {
+ var x = 3;
+ var y = 4;
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+}
+local_4(1, 2)
+EndTest();
+
+
+// Empty local scope with use of eval.
+BeginTest("Local 5");
+
+function local_5() {
+ eval('');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+}
+local_5()
+EndTest();
+
+
+// Local introducing local variable using eval.
+BeginTest("Local 6");
+
+function local_6() {
+ eval('var i = 5');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({i:5}, 0, exec_state);
+}
+local_6()
+EndTest();
+
+
+// Local scope with parameters, local variables and local variable introduced
+// using eval.
+BeginTest("Local 7");
+
+function local_7(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
+}
+local_7(1, 2)
+EndTest();
+
+
+// Single empty with block.
+BeginTest("With 1");
+
+function with_1() {
+ with({}) {
+ debugger;
+ }
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+}
+with_1()
+EndTest();
+
+
+// Nested empty with blocks.
+BeginTest("With 2");
+
+function with_2() {
+ with({}) {
+ with({}) {
+ debugger;
+ }
+ }
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ CheckScopeContent({}, 1, exec_state);
+}
+with_2()
+EndTest();
+
+
+// With block using an in-place object literal.
+BeginTest("With 3");
+
+function with_3() {
+ with({a:1,b:2}) {
+ debugger;
+ }
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2}, 0, exec_state);
+}
+with_3()
+EndTest();
+
+
+// Nested with blocks using in-place object literals.
+BeginTest("With 4");
+
+function with_4() {
+ with({a:1,b:2}) {
+ with({a:2,b:1}) {
+ debugger;
+ }
+ }
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:2,b:1}, 0, exec_state);
+ CheckScopeContent({a:1,b:2}, 1, exec_state);
+}
+with_4()
+EndTest();
+
+
+// Nested with blocks using existing object.
+BeginTest("With 5");
+
+var with_object = {c:3,d:4};
+function with_5() {
+ with(with_object) {
+ with(with_object) {
+ debugger;
+ }
+ }
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent(with_object, 0, exec_state);
+ CheckScopeContent(with_object, 1, exec_state);
+ assertEquals(exec_state.frame().scope(0).scopeObject(), exec_state.frame().scope(1).scopeObject());
+ assertEquals(with_object, exec_state.frame().scope(1).scopeObject().value());
+}
+with_5()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments.
+BeginTest("Closure 1");
+
+function closure_1(a) {
+ function f() {
+ debugger;
+ return a;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 1, exec_state);
+}
+closure_1(1)()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Due to VM optimizations parts of the actual closure is
+// missing from the debugger information.
+BeginTest("Closure 2");
+
+function closure_2(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ return a + x;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,x:3}, 1, exec_state);
+}
+closure_2(1, 2)()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure.
+BeginTest("Closure 3");
+
+function closure_3(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ return a + b + x + y;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
+}
+closure_3(1, 2)()
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure. Use the inner function as well...
+BeginTest("Closure 4");
+
+function closure_4(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ if (f) {
+ return a + b + x + y;
+ }
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+}
+closure_4(1, 2)()
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. In the presence of eval all arguments and locals
+// (including the inner function itself) from the outer function becomes part of
+// the debugger infformation on the closure.
+BeginTest("Closure 5");
+
+function closure_5(a, b) {
+ var x = 3;
+ var y = 4;
+ function f() {
+ eval('');
+ debugger;
+ return 1;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+}
+closure_5(1, 2)()
+EndTest();
+
+
+// Two closures. Due to optimizations only the parts actually used are provided
+// through the debugger information.
+BeginTest("Closure 6");
+function closure_6(a, b) {
+ function f(a, b) {
+ var x = 3;
+ var y = 4;
+ return function() {
+ var x = 3;
+ var y = 4;
+ debugger;
+ some_global = a;
+ return f;
+ }
+ }
+ return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 1, exec_state);
+ CheckScopeContent({f:function(){}}, 2, exec_state);
+}
+closure_6(1, 2)()
+EndTest();
+
+
+// Two closures. In the presence of eval all information is provided as the
+// compiler cannot determine which parts are used.
+BeginTest("Closure 7");
+function closure_7(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ function f(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ return function() {
+ debugger;
+ some_global = a;
+ return f;
+ }
+ }
+ return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 1, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 2, exec_state);
+}
+closure_7(1, 2)()
+EndTest();
+
+
+// Test a mixture of scopes.
+BeginTest("The full monty");
+function the_full_monty(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ function f(a, b) {
+ var x = 9;
+ var y = 10;
+ eval('var i = 11');
+ eval('var j = 12');
+ with ({j:13}){
+ return function() {
+ var x = 14;
+ with ({a:15}) {
+ with ({b:16}) {
+ debugger;
+ some_global = a;
+ return f;
+ }
+ }
+ }
+ }
+ }
+ return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({b:16}, 0, exec_state);
+ CheckScopeContent({a:15}, 1, exec_state);
+ CheckScopeContent({x:14}, 2, exec_state);
+ CheckScopeContent({j:13}, 3, exec_state);
+ CheckScopeContent({a:1,b:2,x:9,y:10,i:11,j:12}, 4, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 5, exec_state);
+}
+the_full_monty(1, 2)()
+EndTest();
+
+// Test global scope.
+BeginTest("Global");
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Global], exec_state);
+}
+debugger;
+EndTest();
+
+assertEquals(begin_test_count, break_count, 'one or more tests did not enter the debugger');
+assertEquals(begin_test_count, end_test_count, 'one or more tests did not have its result checked');
diff --git a/V8Binding/v8/test/mjsunit/debug-sourceinfo.js b/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
index 36e9f03..0235796 100644
--- a/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
+++ b/V8Binding/v8/test/mjsunit/debug-sourceinfo.js
@@ -1,276 +1,352 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// For this test to work this file MUST have CR LF line endings.
-function a() { b(); };
-function b() {
- c(true);
-};
- function c(x) {
- if (x) {
- return 1;
- } else {
- return 1;
- }
- };
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// This is the number of comment lines above the first test function.
-var comment_lines = 29;
-
-// This magic number is the length or the first line comment (actually number
-// of characters before 'function a(...'.
-var comment_line_length = 1726;
-var start_a = 10 + comment_line_length;
-var start_b = 37 + comment_line_length;
-var start_c = 71 + comment_line_length;
-
-assertEquals(start_a, Debug.sourcePosition(a));
-assertEquals(start_b, Debug.sourcePosition(b));
-assertEquals(start_c, Debug.sourcePosition(c));
-
-var script = Debug.findScript(a);
-assertTrue(script.data === Debug.findScript(b).data);
-assertTrue(script.data === Debug.findScript(c).data);
-assertTrue(script.source === Debug.findScript(b).source);
-assertTrue(script.source === Debug.findScript(c).source);
-
-// Test that when running through source positions the position, line and
-// column progresses as expected.
-var position;
-var line;
-var column;
-for (var p = 0; p < 100; p++) {
- var location = script.locationFromPosition(p);
- if (p > 0) {
- assertEquals(position + 1, location.position);
- if (line == location.line) {
- assertEquals(column + 1, location.column);
- } else {
- assertEquals(line + 1, location.line);
- assertEquals(0, location.column);
- }
- } else {
- assertEquals(0, location.position);
- assertEquals(0, location.line);
- assertEquals(0, location.column);
- }
-
- // Remember the location.
- position = location.position;
- line = location.line;
- column = location.column;
-}
-
-// Test first position.
-assertEquals(0, script.locationFromPosition(0).position);
-assertEquals(0, script.locationFromPosition(0).line);
-assertEquals(0, script.locationFromPosition(0).column);
-
-// Test second position.
-assertEquals(1, script.locationFromPosition(1).position);
-assertEquals(0, script.locationFromPosition(1).line);
-assertEquals(1, script.locationFromPosition(1).column);
-
-// Test first position in finction a.
-assertEquals(start_a, script.locationFromPosition(start_a).position);
-assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
-assertEquals(10, script.locationFromPosition(start_a).column);
-
-// Test first position in finction b.
-assertEquals(start_b, script.locationFromPosition(start_b).position);
-assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
-assertEquals(13, script.locationFromPosition(start_b).column);
-
-// Test first position in finction b.
-assertEquals(start_c, script.locationFromPosition(start_c).position);
-assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
-assertEquals(12, script.locationFromPosition(start_c).column);
-
-// Test first line.
-assertEquals(0, script.locationFromLine().position);
-assertEquals(0, script.locationFromLine().line);
-assertEquals(0, script.locationFromLine().column);
-assertEquals(0, script.locationFromLine(0).position);
-assertEquals(0, script.locationFromLine(0).line);
-assertEquals(0, script.locationFromLine(0).column);
-
-// Test first line column 1
-assertEquals(1, script.locationFromLine(0, 1).position);
-assertEquals(0, script.locationFromLine(0, 1).line);
-assertEquals(1, script.locationFromLine(0, 1).column);
-
-// Test first line offset 1
-assertEquals(1, script.locationFromLine(0, 0, 1).position);
-assertEquals(0, script.locationFromLine(0, 0, 1).line);
-assertEquals(1, script.locationFromLine(0, 0, 1).column);
-
-// Test offset function a
-assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, 0, start_a).column);
-
-// Test second line offset function a
-assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);
-assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_a).column);
-
-// Test second line column 2 offset function a
-assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);
-assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
-assertEquals(2, script.locationFromLine(1, 2, start_a).column);
-
-// Test offset function b
-assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
-assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
-assertEquals(13, script.locationFromLine(0, 0, start_b).column);
-
-// Test second line offset function b
-assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);
-assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_b).column);
-
-// Test second line column 11 offset function b
-assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);
-assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);
-assertEquals(11, script.locationFromLine(1, 11, start_b).column);
-
-// Test second line column 12 offset function b. Second line in b is 11 long
-// using column 12 wraps to next line.
-assertEquals(start_b + 6 + 12, script.locationFromLine(1, 12, start_b).position);
-assertEquals(3, script.locationFromLine(1, 12, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 12, start_b).column);
-
-// Test the Debug.findSourcePosition which wraps SourceManager.
-assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
-assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
-assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
-assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
-assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
-assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
-assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
-assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
-assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
-assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
-assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
-assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
-
-// Test source line and restriction. All the following tests start from line 1
-// column 2 in function b, which is the call to c.
-// c(true);
-// ^
-
-var location;
-
-location = script.locationFromLine(1, 0, start_b);
-assertEquals(' c(true);', location.sourceText());
-
-result = ['c', ' c', ' c(', ' c(', ' c(t']
-for (var i = 1; i <= 5; i++) {
- location = script.locationFromLine(1, 2, start_b);
- location.restrict(i);
- assertEquals(result[i - 1], location.sourceText());
-}
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(1, 0);
-assertEquals('c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 0);
-assertEquals('c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 1);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 2);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 3);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(3, 1);
-assertEquals(' c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 0);
-assertEquals('c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 2);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-// All the following tests start from line 1 column 10 in function b, which is
-// the final character.
-// c(true);
-// ^
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(5, 0);
-assertEquals('rue);', location.sourceText());
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(7, 0);
-assertEquals('(true);', location.sourceText());
-
-// All the following tests start from line 1 column 0 in function b, which is
-// the first character.
-// c(true);
-//^
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 0);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 0);
-assertEquals(' c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 6);
-assertEquals(' c(tru', location.sourceText());
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// For this test to work this file MUST have CR LF line endings.
+function a() { b(); };
+function b() {
+ c(true);
+};
+ function c(x) {
+ if (x) {
+ return 1;
+ } else {
+ return 1;
+ }
+ };
+function d(x) {
+ x = 1 ;
+ x = 2 ;
+ x = 3 ;
+ x = 4 ;
+ x = 5 ;
+ x = 6 ;
+ x = 7 ;
+ x = 8 ;
+ x = 9 ;
+ x = 10;
+ x = 11;
+ x = 12;
+ x = 13;
+ x = 14;
+ x = 15;
+}
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// This is the number of comment lines above the first test function.
+var comment_lines = 29;
+
+// This is the last position in the entire file (note: this equals
+// file size of <debug-sourceinfo.js> - 1, since starting at 0).
+var last_position = 14312;
+// This is the last line of entire file (note: starting at 0).
+var last_line = 351;
+// This is the last column of last line (note: starting at 0 and +2, due
+// to trailing <CR><LF>).
+var last_column = 2;
+
+// This magic number is the length or the first line comment (actually number
+// of characters before 'function a(...'.
+var comment_line_length = 1726;
+var start_a = 10 + comment_line_length;
+var start_b = 37 + comment_line_length;
+var start_c = 71 + comment_line_length;
+var start_d = 163 + comment_line_length;
+
+// The position of the first line of d(), i.e. "x = 1 ;".
+var start_code_d = start_d + 7;
+// The line # of the first line of d() (note: starting at 0).
+var start_line_d = 41;
+var line_length_d = 11;
+var num_lines_d = 15;
+
+assertEquals(start_a, Debug.sourcePosition(a));
+assertEquals(start_b, Debug.sourcePosition(b));
+assertEquals(start_c, Debug.sourcePosition(c));
+assertEquals(start_d, Debug.sourcePosition(d));
+
+var script = Debug.findScript(a);
+assertTrue(script.data === Debug.findScript(b).data);
+assertTrue(script.data === Debug.findScript(c).data);
+assertTrue(script.data === Debug.findScript(d).data);
+assertTrue(script.source === Debug.findScript(b).source);
+assertTrue(script.source === Debug.findScript(c).source);
+assertTrue(script.source === Debug.findScript(d).source);
+
+// Test that when running through source positions the position, line and
+// column progresses as expected.
+var position;
+var line;
+var column;
+for (var p = 0; p < 100; p++) {
+ var location = script.locationFromPosition(p);
+ if (p > 0) {
+ assertEquals(position + 1, location.position);
+ if (line == location.line) {
+ assertEquals(column + 1, location.column);
+ } else {
+ assertEquals(line + 1, location.line);
+ assertEquals(0, location.column);
+ }
+ } else {
+ assertEquals(0, location.position);
+ assertEquals(0, location.line);
+ assertEquals(0, location.column);
+ }
+
+ // Remember the location.
+ position = location.position;
+ line = location.line;
+ column = location.column;
+}
+
+// Every line of d() is the same length. Verify we can loop through all
+// positions and find the right line # for each.
+var p = start_code_d;
+for (line = 0; line < num_lines_d; line++) {
+ for (column = 0; column < line_length_d; column++) {
+ var location = script.locationFromPosition(p);
+ assertEquals(p, location.position);
+ assertEquals(start_line_d + line, location.line);
+ assertEquals(column, location.column);
+ p++;
+ }
+}
+
+// Test first position.
+assertEquals(0, script.locationFromPosition(0).position);
+assertEquals(0, script.locationFromPosition(0).line);
+assertEquals(0, script.locationFromPosition(0).column);
+
+// Test second position.
+assertEquals(1, script.locationFromPosition(1).position);
+assertEquals(0, script.locationFromPosition(1).line);
+assertEquals(1, script.locationFromPosition(1).column);
+
+// Test first position in function a().
+assertEquals(start_a, script.locationFromPosition(start_a).position);
+assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_a).column);
+
+// Test first position in function b().
+assertEquals(start_b, script.locationFromPosition(start_b).position);
+assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
+assertEquals(13, script.locationFromPosition(start_b).column);
+
+// Test first position in function c().
+assertEquals(start_c, script.locationFromPosition(start_c).position);
+assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
+assertEquals(12, script.locationFromPosition(start_c).column);
+
+// Test first position in function d().
+assertEquals(start_d, script.locationFromPosition(start_d).position);
+assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_d).column);
+
+// Test first line.
+assertEquals(0, script.locationFromLine().position);
+assertEquals(0, script.locationFromLine().line);
+assertEquals(0, script.locationFromLine().column);
+assertEquals(0, script.locationFromLine(0).position);
+assertEquals(0, script.locationFromLine(0).line);
+assertEquals(0, script.locationFromLine(0).column);
+
+// Test first line column 1.
+assertEquals(1, script.locationFromLine(0, 1).position);
+assertEquals(0, script.locationFromLine(0, 1).line);
+assertEquals(1, script.locationFromLine(0, 1).column);
+
+// Test first line offset 1.
+assertEquals(1, script.locationFromLine(0, 0, 1).position);
+assertEquals(0, script.locationFromLine(0, 0, 1).line);
+assertEquals(1, script.locationFromLine(0, 0, 1).column);
+
+// Test offset function a().
+assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, 0, start_a).column);
+
+// Test second line offset function a().
+assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);
+assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_a).column);
+
+// Test second line column 2 offset function a().
+assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);
+assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
+assertEquals(2, script.locationFromLine(1, 2, start_a).column);
+
+// Test offset function b().
+assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
+assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
+assertEquals(13, script.locationFromLine(0, 0, start_b).column);
+
+// Test second line offset function b().
+assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);
+assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_b).column);
+
+// Test second line column 11 offset function b().
+assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);
+assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);
+assertEquals(11, script.locationFromLine(1, 11, start_b).column);
+
+// Test second line column 12 offset function b. Second line in b is 11 long
+// using column 12 wraps to next line.
+assertEquals(start_b + 6 + 12, script.locationFromLine(1, 12, start_b).position);
+assertEquals(3, script.locationFromLine(1, 12, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 12, start_b).column);
+
+// Test the Debug.findSourcePosition which wraps SourceManager.
+assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
+assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
+assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
+assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
+assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
+assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
+assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
+assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
+assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
+assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
+assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
+assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
+assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
+assertEquals(7 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
+for (i = 1; i <= num_lines_d; i++) {
+ assertEquals(7 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
+}
+assertEquals(175 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
+
+// Make sure invalid inputs work properly.
+assertEquals(0, script.locationFromPosition(-1).line);
+assertEquals(null, script.locationFromPosition(last_position + 1));
+
+// Test last position.
+assertEquals(last_position, script.locationFromPosition(last_position).position);
+assertEquals(last_line, script.locationFromPosition(last_position).line);
+assertEquals(last_column, script.locationFromPosition(last_position).column);
+
+// Test source line and restriction. All the following tests start from line 1
+// column 2 in function b, which is the call to c.
+// c(true);
+// ^
+
+var location;
+
+location = script.locationFromLine(1, 0, start_b);
+assertEquals(' c(true);', location.sourceText());
+
+result = ['c', ' c', ' c(', ' c(', ' c(t']
+for (var i = 1; i <= 5; i++) {
+ location = script.locationFromLine(1, 2, start_b);
+ location.restrict(i);
+ assertEquals(result[i - 1], location.sourceText());
+}
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(1, 0);
+assertEquals('c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 0);
+assertEquals('c(', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 1);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 2);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(2, 3);
+assertEquals(' c', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(3, 1);
+assertEquals(' c(', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 0);
+assertEquals('c(tru', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 2);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 2, start_b);
+location.restrict(5, 4);
+assertEquals(' c(t', location.sourceText());
+
+// All the following tests start from line 1 column 10 in function b, which is
+// the final character.
+// c(true);
+// ^
+
+location = script.locationFromLine(1, 10, start_b);
+location.restrict(5, 0);
+assertEquals('rue);', location.sourceText());
+
+location = script.locationFromLine(1, 10, start_b);
+location.restrict(7, 0);
+assertEquals('(true);', location.sourceText());
+
+// All the following tests start from line 1 column 0 in function b, which is
+// the first character.
+// c(true);
+//^
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(5, 0);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(5, 4);
+assertEquals(' c(t', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(7, 0);
+assertEquals(' c(tru', location.sourceText());
+
+location = script.locationFromLine(1, 0, start_b);
+location.restrict(7, 6);
+assertEquals(' c(tru', location.sourceText());
+
+// Test that script.sourceLine(line) works.
+for (line = 0; line < num_lines_d; line++) {
+ var line_content_regexp = new RegExp(" x = " + (line + 1));
+ assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
+}
diff --git a/V8Binding/v8/test/mjsunit/html-comments.js b/V8Binding/v8/test/mjsunit/html-comments.js
index f39271a..cc2315b 100644
--- a/V8Binding/v8/test/mjsunit/html-comments.js
+++ b/V8Binding/v8/test/mjsunit/html-comments.js
@@ -32,26 +32,26 @@ var x = 1;
--> so must this...
--> and this.
x-->0;
-assertEquals(0, x);
+assertEquals(0, x, 'a');
var x = 0; x <!-- x
-assertEquals(0, x);
+assertEquals(0, x, 'b');
var x = 1; x <!--x
-assertEquals(1, x);
+assertEquals(1, x, 'c');
var x = 2; x <!-- x; x = 42;
-assertEquals(2, x);
+assertEquals(2, x, 'd');
var x = 1; x <! x--;
-assertEquals(0, x);
+assertEquals(0, x, 'e');
var x = 1; x <!- x--;
-assertEquals(0, x);
+assertEquals(0, x, 'f');
var b = true <! true;
-assertFalse(b);
+assertFalse(b, 'g');
var b = true <!- true;
-assertFalse(b);
+assertFalse(b, 'h');
diff --git a/V8Binding/v8/test/mjsunit/regexp-captures.js b/V8Binding/v8/test/mjsunit/regexp-captures.js
new file mode 100644
index 0000000..91548d6
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regexp-captures.js
@@ -0,0 +1,31 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var re = /^(((N({)?)|(R)|(U)|(V)|(B)|(H)|(n((n)|(r)|(v)|(h))?)|(r(r)?)|(v)|(b((n)|(b))?)|(h))|((Y)|(A)|(E)|(o(u)?)|(p(u)?)|(q(u)?)|(s)|(t)|(u)|(w)|(x(u)?)|(y)|(z)|(a((T)|(A)|(L))?)|(c)|(e)|(f(u)?)|(g(u)?)|(i)|(j)|(l)|(m(u)?)))+/;
+var r = new RegExp(re)
+var str = "Avtnennan gunzvmu pubExnY nEvln vaTxh rmuhguhaTxnY"
+assertTrue(r.test(str));
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-1919169.js b/V8Binding/v8/test/mjsunit/regress/regress-1919169.js
new file mode 100644
index 0000000..774f265
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-1919169.js
@@ -0,0 +1,40 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+function test() {
+ var s2 = "s2";
+ for (var i = 0; i < 2; i++) {
+ // Crashes in round i==1 with IllegalAccess in %StringAdd(x,y)
+ var res = 1 + s2;
+ s2 = 2;
+ }
+}
+
+// Crash does not occur when code is run at the top level.
+test();
+
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-386.js b/V8Binding/v8/test/mjsunit/regress/regress-386.js
new file mode 100644
index 0000000..06e4b8e
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-386.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test for http://code.google.com/p/v8/issues/detail?id=386
+// This test creates enough properties in A so that adding i as
+// a constant function, in the first call to the constructor, leaves
+// the object's map in the fast case and adds a constant function map
+// transition.
+// Adding i in the second call to the constructor creates a real property,
+// and simultaneously converts the object from fast case to slow case
+// and changes i from a map transition to a real property. There was
+// a flaw in the code that handled this combination of events.
+
+function A() {
+ for (var i = 0; i < 13; i++) {
+ this['a' + i] = i;
+ }
+ this.i = function(){};
+};
+
+new A();
+new A();
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-392.js b/V8Binding/v8/test/mjsunit/regress/regress-392.js
new file mode 100644
index 0000000..3cabcac
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-392.js
@@ -0,0 +1,34 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for issue 392 reported by nth10sd; see
+// http://code.google.com/p/v8/issues/detail?id=392
+
+assertTrue(isNaN((function(){return arguments++})()));
+assertTrue(isNaN((function(){return ++arguments})()));
+assertTrue(isNaN((function(){return arguments--})()));
+assertTrue(isNaN((function(){return --arguments})()));
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-6-9-regexp.js b/V8Binding/v8/test/mjsunit/regress/regress-6-9-regexp.js
new file mode 100644
index 0000000..c73b37d
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-6-9-regexp.js
@@ -0,0 +1,30 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that the perfect mask check isn't overly optimistic.
+
+assertFalse(/[6-9]/.test('2'));
diff --git a/V8Binding/v8/test/mjsunit/sin-cos.js b/V8Binding/v8/test/mjsunit/sin-cos.js
new file mode 100644
index 0000000..ae02451
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/sin-cos.js
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test Math.sin and Math.cos.
+
+var input_sin = [0, Math.PI / 2];
+var input_cos = [0, Math.PI];
+
+var output_sin = input_sin.map(Math.sin);
+var output_cos = input_cos.map(Math.cos);
+
+var expected_sin = [0, 1];
+var expected_cos = [1, -1];
+
+assertArrayEquals(expected_sin, output_sin, "sine");
+assertArrayEquals(expected_cos, output_cos, "cosine");
+
+// By accident, the slow case for sine and cosine were both sine at
+// some point. This is a regression test for that issue.
+var x = Math.pow(2, 70);
+assertTrue(Math.sin(x) != Math.cos(x));
diff --git a/V8Binding/v8/test/mjsunit/smi-ops.js b/V8Binding/v8/test/mjsunit/smi-ops.js
index 7e57136..5520327 100644
--- a/V8Binding/v8/test/mjsunit/smi-ops.js
+++ b/V8Binding/v8/test/mjsunit/smi-ops.js
@@ -196,6 +196,54 @@ assertEquals(78, Xor100Reversed(OBJ_42));
var x = 0x23; var y = 0x35;
assertEquals(0x16, x ^ y);
+
+// Bitwise not.
+var v = 0;
+assertEquals(-1, ~v);
+v = SMI_MIN;
+assertEquals(0x3fffffff, ~v);
+v = SMI_MAX;
+assertEquals(-0x40000000, ~v);
+
+// Overflowing ++ and --.
+v = SMI_MAX;
+v++;
+assertEquals(0x40000000, v);
+v = SMI_MIN;
+v--;
+assertEquals(-0x40000001, v);
+
+// Not actually Smi operations.
+// Check that relations on unary ops work.
+var v = -1.2;
+assertTrue(v == v);
+assertTrue(v === v);
+assertTrue(v <= v);
+assertTrue(v >= v);
+assertFalse(v < v);
+assertFalse(v > v);
+assertFalse(v != v);
+assertFalse(v !== v);
+
+// Right hand side of unary minus is overwritable.
+v = 1.5
+assertEquals(-2.25, -(v * v));
+
+// Smi input to bitop gives non-smi result where the rhs is a float that
+// can be overwritten.
+var x1 = 0x10000000;
+var x2 = 0x40000002;
+var x3 = 0x40000000;
+assertEquals(0x40000000, x1 << (x2 - x3));
+
+// Smi input to bitop gives non-smi result where the rhs could be overwritten
+// if it were a float, but it isn't.
+x1 = 0x10000000
+x2 = 4
+x3 = 2
+assertEquals(0x40000000, x1 << (x2 - x3));
+
+
// Test shift operators on non-smi inputs, giving smi and non-smi results.
function testShiftNonSmis() {
var pos_non_smi = 2000000000;
@@ -585,3 +633,10 @@ function testShiftNonSmis() {
}
testShiftNonSmis();
+
+
+// Verify that we handle the (optimized) corner case of shifting by
+// zero even for non-smis.
+function shiftByZero(n) { return n << 0; }
+
+assertEquals(3, shiftByZero(3.1415));
diff --git a/V8Binding/v8/test/mjsunit/stack-traces.js b/V8Binding/v8/test/mjsunit/stack-traces.js
new file mode 100644
index 0000000..6ac8b0a
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/stack-traces.js
@@ -0,0 +1,160 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Error.captureStackTraces = true;
+
+function testMethodNameInference() {
+ function Foo() { }
+ Foo.prototype.bar = function () { FAIL; };
+ (new Foo).bar();
+}
+
+function testNested() {
+ function one() {
+ function two() {
+ function three() {
+ FAIL;
+ }
+ three();
+ }
+ two();
+ }
+ one();
+}
+
+function testArrayNative() {
+ [1, 2, 3].map(function () { FAIL; });
+}
+
+function testImplicitConversion() {
+ function Nirk() { }
+ Nirk.prototype.valueOf = function () { FAIL; };
+ return 1 + (new Nirk);
+}
+
+function testEval() {
+ eval("function Doo() { FAIL; }; Doo();");
+}
+
+function testNestedEval() {
+ var x = "FAIL";
+ eval("function Outer() { eval('function Inner() { eval(x); }'); Inner(); }; Outer();");
+}
+
+function testValue() {
+ Number.prototype.causeError = function () { FAIL; };
+ (1).causeError();
+}
+
+function testConstructor() {
+ function Plonk() { FAIL; }
+ new Plonk();
+}
+
+// Utility function for testing that the expected strings occur
+// in the stack trace produced when running the given function.
+function testTrace(fun, expected) {
+ var threw = false;
+ try {
+ fun();
+ } catch (e) {
+ for (var i = 0; i < expected.length; i++) {
+ assertTrue(e.stack.indexOf(expected[i]) != -1);
+ }
+ threw = true;
+ }
+ assertTrue(threw);
+}
+
+// Test that the error constructor is not shown in the trace
+function testCallerCensorship() {
+ var threw = false;
+ try {
+ FAIL;
+ } catch (e) {
+ assertEquals(-1, e.stack.indexOf('at new ReferenceError'));
+ threw = true;
+ }
+ assertTrue(threw);
+}
+
+// Test that the explicit constructor call is shown in the trace
+function testUnintendedCallerCensorship() {
+ var threw = false;
+ try {
+ new ReferenceError({
+ toString: function () {
+ FAIL;
+ }
+ });
+ } catch (e) {
+ assertTrue(e.stack.indexOf('at new ReferenceError') != -1);
+ threw = true;
+ }
+ assertTrue(threw);
+}
+
+// If an error occurs while the stack trace is being formatted it should
+// be handled gracefully.
+function testErrorsDuringFormatting() {
+ function Nasty() { }
+ Nasty.prototype.foo = function () { throw new RangeError(); };
+ var n = new Nasty();
+ n.__defineGetter__('constructor', function () { CONS_FAIL; });
+ var threw = false;
+ try {
+ n.foo();
+ } catch (e) {
+ threw = true;
+ assertTrue(e.stack.indexOf('<error: ReferenceError') != -1);
+ }
+ assertTrue(threw);
+ threw = false;
+ // Now we can't even format the message saying that we couldn't format
+ // the stack frame. Put that in your pipe and smoke it!
+ ReferenceError.prototype.toString = function () { NESTED_FAIL; };
+ try {
+ n.foo();
+ } catch (e) {
+ threw = true;
+ assertTrue(e.stack.indexOf('<error>') != -1);
+ }
+ assertTrue(threw);
+}
+
+testTrace(testArrayNative, ["Array.map (native)"]);
+testTrace(testNested, ["at one", "at two", "at three"]);
+testTrace(testMethodNameInference, ["at Foo.bar"]);
+testTrace(testImplicitConversion, ["at Nirk.valueOf"]);
+testTrace(testEval, ["at Doo (eval at testEval"]);
+testTrace(testNestedEval, ["at eval (eval at Inner (eval at Outer"]);
+testTrace(testValue, ["at Number.causeError"]);
+testTrace(testConstructor, ["new Plonk"]);
+
+testCallerCensorship();
+testUnintendedCallerCensorship();
+testErrorsDuringFormatting();
diff --git a/V8Binding/v8/test/mjsunit/toint32.js b/V8Binding/v8/test/mjsunit/toint32.js
index a558295..9dad9c9 100644
--- a/V8Binding/v8/test/mjsunit/toint32.js
+++ b/V8Binding/v8/test/mjsunit/toint32.js
@@ -29,19 +29,19 @@ function toInt32(x) {
return x | 0;
}
-assertEquals(0, toInt32(Infinity));
-assertEquals(0, toInt32(-Infinity));
-assertEquals(0, toInt32(NaN));
-assertEquals(0, toInt32(0.0));
-assertEquals(0, toInt32(-0.0));
+assertEquals(0, toInt32(Infinity), "Inf");
+assertEquals(0, toInt32(-Infinity), "-Inf");
+assertEquals(0, toInt32(NaN), "NaN");
+assertEquals(0, toInt32(0.0), "zero");
+assertEquals(0, toInt32(-0.0), "-zero");
assertEquals(0, toInt32(Number.MIN_VALUE));
assertEquals(0, toInt32(-Number.MIN_VALUE));
assertEquals(0, toInt32(0.1));
assertEquals(0, toInt32(-0.1));
-assertEquals(1, toInt32(1));
-assertEquals(1, toInt32(1.1));
-assertEquals(-1, toInt32(-1));
+assertEquals(1, toInt32(1), "one");
+assertEquals(1, toInt32(1.1), "onepointone");
+assertEquals(-1, toInt32(-1), "-one");
assertEquals(0, toInt32(0.6), "truncate positive (0.6)");
assertEquals(1, toInt32(1.6), "truncate positive (1.6)");
assertEquals(0, toInt32(-0.6), "truncate negative (-0.6)");
diff --git a/V8Binding/v8/test/mjsunit/tools/logreader.js b/V8Binding/v8/test/mjsunit/tools/logreader.js
new file mode 100644
index 0000000..dfd7f9f
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/tools/logreader.js
@@ -0,0 +1,82 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Load CSV Parser and Log Reader implementations from <project root>/tools.
+// Files: tools/csvparser.js tools/logreader.js
+
+
+(function testAddressParser() {
+ var reader = new devtools.profiler.LogReader({});
+ var parser = reader.createAddressParser('test');
+
+ // Test that 0x values are parsed, and prevAddresses_ are untouched.
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0, parser('0x0'));
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0x100, parser('0x100'));
+ assertFalse('test' in reader.prevAddresses_);
+ assertEquals(0xffffffff, parser('0xffffffff'));
+ assertFalse('test' in reader.prevAddresses_);
+
+ // Test that values that has no '+' or '-' prefix are parsed
+ // and saved to prevAddresses_.
+ assertEquals(0, parser('0'));
+ assertEquals(0, reader.prevAddresses_.test);
+ assertEquals(0x100, parser('100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+ assertEquals(0xffffffff, parser('ffffffff'));
+ assertEquals(0xffffffff, reader.prevAddresses_.test);
+
+ // Test that values prefixed with '+' or '-' are treated as deltas,
+ // and prevAddresses_ is updated.
+ // Set base value.
+ assertEquals(0x100, parser('100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+ assertEquals(0x200, parser('+100'));
+ assertEquals(0x200, reader.prevAddresses_.test);
+ assertEquals(0x100, parser('-100'));
+ assertEquals(0x100, reader.prevAddresses_.test);
+})();
+
+
+(function testAddressParser() {
+ var reader = new devtools.profiler.LogReader({});
+
+ assertEquals([0x10000000, 0x10001000, 0xffff000, 0x10000000],
+ reader.processStack(0x10000000, ['overflow',
+ '+1000', '-2000', '+1000']));
+})();
+
+
+(function testExpandBackRef() {
+ var reader = new devtools.profiler.LogReader({});
+
+ assertEquals('aaaaaaaa', reader.expandBackRef_('aaaaaaaa'));
+ assertEquals('aaaaaaaa', reader.expandBackRef_('#1'));
+ assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
+ assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
+})();
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index 97182f3..760ed41 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -88,17 +88,18 @@ js1_5/GC/regress-348532: SLOW
##################### FLAKY TESTS #####################
# These tests time out in debug mode but pass in product mode
+js1_5/Regress/regress-360969-03: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-04: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-05: PASS || TIMEOUT if $mode == debug
+js1_5/Regress/regress-360969-06: PASS || TIMEOUT if $mode == debug
+js1_5/extensions/regress-365527: PASS || TIMEOUT if $mode == debug
+
js1_5/Regress/regress-280769-3: PASS || FAIL if $mode == debug
js1_5/Regress/regress-203278-1: PASS || FAIL if $mode == debug
js1_5/GC/regress-203278-2: PASS || FAIL if $mode == debug
js1_5/Regress/regress-244470: PASS || FAIL if $mode == debug
ecma_3/RegExp/regress-209067: PASS || FAIL if $mode == debug
js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-03: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-04: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-05: PASS || FAIL if $mode == debug
-js1_5/Regress/regress-360969-06: PASS || FAIL if $mode == debug
-js1_5/extensions/regress-365527: PASS || FAIL if $mode == debug
# http://b/issue?id=1206983
js1_5/Regress/regress-367561-03: PASS || FAIL if $mode == debug
ecma/Date/15.9.5.10-2: PASS || FAIL if $mode == debug
@@ -148,7 +149,7 @@ js1_5/String/regress-322772: PASS || FAIL
js1_5/Array/regress-99120-01: PASS || FAIL
js1_5/Array/regress-99120-02: PASS || FAIL
js1_5/Regress/regress-347306-01: PASS || FAIL
-js1_5/Regress/regress-416628: PASS || FAIL
+js1_5/Regress/regress-416628: PASS || FAIL || TIMEOUT if $mode == debug
# The following two tests assume that daylight savings time starts first Sunday
@@ -203,7 +204,7 @@ ecma/String/15.5.4.12-4: FAIL_OK
ecma/String/15.5.4.12-5: FAIL_OK
# Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: FAIL_OK
+js1_5/Regress/regress-312588: FAIL || TIMEOUT
# Runs out of memory because it compiles huge functions.
@@ -247,14 +248,14 @@ js1_5/extensions/regress-459606: PASS || FAIL_OK
# PCRE's match limit is reached. SpiderMonkey hangs on the first one,
# JSC returns true somehow. Maybe they up the match limit? There is
# an open V8 bug 676063 about this.
-ecma_3/RegExp/regress-330684: FAIL_OK
+ecma_3/RegExp/regress-330684: TIMEOUT
# This test contains a regexp that runs exponentially long. Spidermonkey
# standalone will hang, though apparently inside Firefox it will trigger a
# long-running-script timeout. JSCRE passes by hitting the matchLimit and
# just pretending that an exhaustive search found no match.
-ecma_3/RegExp/regress-307456: PASS || FAIL_OK
+ecma_3/RegExp/regress-307456: PASS || TIMEOUT
# We do not detect overflow in bounds for back references and {}
@@ -594,7 +595,7 @@ js1_5/Regress/regress-306633: FAIL
# This test seems designed to fail (it produces a 700Mbyte string).
# We fail on out of memory. The important thing is not to crash.
-js1_5/Regress/regress-303213: FAIL
+js1_5/Regress/regress-303213: FAIL || TIMEOUT if $mode == debug
# Bug 1202592: New ecma_3/String/15.5.4.11 is failing.
@@ -630,7 +631,6 @@ js1_5/extensions/regress-313803: FAIL_OK
js1_5/extensions/regress-314874: FAIL_OK
js1_5/extensions/regress-322957: FAIL_OK
js1_5/extensions/regress-328556: FAIL_OK
-js1_5/extensions/regress-330569: FAIL_OK
js1_5/extensions/regress-333541: FAIL_OK
js1_5/extensions/regress-335700: FAIL_OK
js1_5/extensions/regress-336409-1: FAIL_OK
@@ -640,7 +640,6 @@ js1_5/extensions/regress-336410-2: FAIL_OK
js1_5/extensions/regress-341956-01: FAIL_OK
js1_5/extensions/regress-341956-02: FAIL_OK
js1_5/extensions/regress-341956-03: FAIL_OK
-js1_5/extensions/regress-342960: FAIL_OK
js1_5/extensions/regress-345967: FAIL_OK
js1_5/extensions/regress-346494-01: FAIL_OK
js1_5/extensions/regress-346494: FAIL_OK
@@ -653,7 +652,6 @@ js1_5/extensions/regress-350531: FAIL_OK
js1_5/extensions/regress-351102-01: FAIL_OK
js1_5/extensions/regress-351102-02: FAIL_OK
js1_5/extensions/regress-351102-06: FAIL_OK
-js1_5/extensions/regress-351448: FAIL_OK
js1_5/extensions/regress-351973: FAIL_OK
js1_5/extensions/regress-352060: FAIL_OK
js1_5/extensions/regress-352094: FAIL_OK
@@ -716,6 +714,10 @@ js1_5/extensions/scope-001: FAIL_OK
js1_5/extensions/toLocaleFormat-01: FAIL_OK
js1_5/extensions/toLocaleFormat-02: FAIL_OK
+js1_5/extensions/regress-330569: TIMEOUT
+js1_5/extensions/regress-351448: TIMEOUT
+js1_5/extensions/regress-342960: FAIL_OK || TIMEOUT if $mode == debug
+
##################### DECOMPILATION TESTS #####################
@@ -776,13 +778,11 @@ js1_5/decompilation/regress-383721: PASS || FAIL
js1_5/decompilation/regress-406555: PASS || FAIL
-[ $FAST == yes ]
-
# These tests take an unreasonable amount of time so we skip them
# in fast mode.
-js1_5/Regress/regress-312588: SKIP
-js1_5/Regress/regress-271716-n: SKIP
+js1_5/Regress/regress-312588: TIMEOUT || SKIP if $FAST == yes
+js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
[ $FAST == yes && $ARCH == arm ]
diff --git a/V8Binding/v8/tools/codemap.js b/V8Binding/v8/tools/codemap.js
index 3766db0..d6df7fa 100644
--- a/V8Binding/v8/tools/codemap.js
+++ b/V8Binding/v8/tools/codemap.js
@@ -126,7 +126,7 @@ devtools.profiler.CodeMap.prototype.addStaticCode = function(
devtools.profiler.CodeMap.prototype.markPages_ = function(start, end) {
for (var addr = start; addr <= end;
addr += devtools.profiler.CodeMap.PAGE_SIZE) {
- this.pages_[addr >> devtools.profiler.CodeMap.PAGE_ALIGNMENT] = 1;
+ this.pages_[addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT] = 1;
}
};
@@ -155,7 +155,7 @@ devtools.profiler.CodeMap.prototype.findInTree_ = function(tree, addr) {
* @param {number} addr Address.
*/
devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
- var pageAddr = addr >> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
+ var pageAddr = addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
if (pageAddr in this.pages_) {
return this.findInTree_(this.statics_, addr);
}
diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp
index 66e1bb6..8815456 100644
--- a/V8Binding/v8/tools/gyp/v8.gyp
+++ b/V8Binding/v8/tools/gyp/v8.gyp
@@ -164,6 +164,7 @@
'../../src/list-inl.h',
'../../src/list.h',
'../../src/log.cc',
+ '../../src/log-inl.h',
'../../src/log.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
diff --git a/V8Binding/v8/tools/linux-tick-processor b/V8Binding/v8/tools/linux-tick-processor
index 968c241..c5130ff 100644
--- a/V8Binding/v8/tools/linux-tick-processor
+++ b/V8Binding/v8/tools/linux-tick-processor
@@ -1,15 +1,23 @@
#!/bin/sh
tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
[ "$D8_PATH" ] || D8_PATH=$tools_path/..
d8_exec=$D8_PATH/d8
+if [ "$1" == "--no-build" ]; then
+ shift
+else
# compile d8 if it doesn't exist, assuming this script
# resides in the repository.
-[ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
+ [ -x $d8_exec ] || scons -j4 -C $D8_PATH -Y $tools_path/.. d8
+fi
# nm spits out 'no symbols found' messages to stderr.
$d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
- $tools_path/tickprocessor.js -- $@ 2>/dev/null
+ $tools_path/logreader.js $tools_path/tickprocessor.js -- $@ 2>/dev/null
diff --git a/V8Binding/v8/tools/logreader.js b/V8Binding/v8/tools/logreader.js
new file mode 100644
index 0000000..78085a4
--- /dev/null
+++ b/V8Binding/v8/tools/logreader.js
@@ -0,0 +1,317 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Log Reader is used to process log file produced by V8.
+ */
+
+// Initlialize namespaces
+var devtools = devtools || {};
+devtools.profiler = devtools.profiler || {};
+
+
+/**
+ * Base class for processing log files.
+ *
+ * @param {Array.<Object>} dispatchTable A table used for parsing and processing
+ * log records.
+ * @constructor
+ */
+devtools.profiler.LogReader = function(dispatchTable) {
+ /**
+ * @type {Array.<Object>}
+ */
+ this.dispatchTable_ = dispatchTable;
+ this.dispatchTable_['alias'] =
+ { parsers: [null, null], processor: this.processAlias_ };
+ this.dispatchTable_['repeat'] =
+ { parsers: [parseInt, 'var-args'], processor: this.processRepeat_,
+ backrefs: true };
+
+ /**
+ * A key-value map for aliases. Translates short name -> full name.
+ * @type {Object}
+ */
+ this.aliases_ = {};
+
+ /**
+ * A key-value map for previous address values.
+ * @type {Object}
+ */
+ this.prevAddresses_ = {};
+
+ /**
+ * A key-value map for events than can be backreference-compressed.
+ * @type {Object}
+ */
+ this.backRefsCommands_ = {};
+ this.initBackRefsCommands_();
+
+ /**
+ * Back references for decompression.
+ * @type {Array.<string>}
+ */
+ this.backRefs_ = [];
+};
+
+
+/**
+ * Creates a parser for an address entry.
+ *
+ * @param {string} addressTag Address tag to perform offset decoding.
+ * @return {function(string):number} Address parser.
+ */
+devtools.profiler.LogReader.prototype.createAddressParser = function(
+ addressTag) {
+ var self = this;
+ return (function (str) {
+ var value = parseInt(str, 16);
+ var firstChar = str.charAt(0);
+ if (firstChar == '+' || firstChar == '-') {
+ var addr = self.prevAddresses_[addressTag];
+ addr += value;
+ self.prevAddresses_[addressTag] = addr;
+ return addr;
+ } else if (firstChar != '0' || str.charAt(1) != 'x') {
+ self.prevAddresses_[addressTag] = value;
+ }
+ return value;
+ });
+};
+
+
+/**
+ * Expands an alias symbol, if applicable.
+ *
+ * @param {string} symbol Symbol to expand.
+ * @return {string} Expanded symbol, or the input symbol itself.
+ */
+devtools.profiler.LogReader.prototype.expandAlias = function(symbol) {
+ return symbol in this.aliases_ ? this.aliases_[symbol] : symbol;
+};
+
+
+/**
+ * Used for printing error messages.
+ *
+ * @param {string} str Error message.
+ */
+devtools.profiler.LogReader.prototype.printError = function(str) {
+ // Do nothing.
+};
+
+
+/**
+ * Processes a portion of V8 profiler event log.
+ *
+ * @param {string} chunk A portion of log.
+ */
+devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
+ this.processLog_(chunk.split('\n'));
+};
+
+
+/**
+ * Processes stack record.
+ *
+ * @param {number} pc Program counter.
+ * @param {Array.<string>} stack String representation of a stack.
+ * @return {Array.<number>} Processed stack.
+ */
+devtools.profiler.LogReader.prototype.processStack = function(pc, stack) {
+ var fullStack = [pc];
+ var prevFrame = pc;
+ for (var i = 0, n = stack.length; i < n; ++i) {
+ var frame = stack[i];
+ var firstChar = frame.charAt(0);
+ if (firstChar == '+' || firstChar == '-') {
+ // An offset from the previous frame.
+ prevFrame += parseInt(frame, 16);
+ fullStack.push(prevFrame);
+ // Filter out possible 'overflow' string.
+ } else if (firstChar != 'o') {
+ fullStack.push(parseInt(frame, 16));
+ }
+ }
+ return fullStack;
+};
+
+
+/**
+ * Returns whether a particular dispatch must be skipped.
+ *
+ * @param {!Object} dispatch Dispatch record.
+ * @return {boolean} True if dispatch must be skipped.
+ */
+devtools.profiler.LogReader.prototype.skipDispatch = function(dispatch) {
+ return false;
+};
+
+
+/**
+ * Does a dispatch of a log record.
+ *
+ * @param {Array.<string>} fields Log record.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.dispatchLogRow_ = function(fields) {
+ // Obtain the dispatch.
+ var command = fields[0];
+ if (!(command in this.dispatchTable_)) {
+ throw new Error('unknown command: ' + command);
+ }
+ var dispatch = this.dispatchTable_[command];
+
+ if (dispatch === null || this.skipDispatch(dispatch)) {
+ return;
+ }
+
+ // Parse fields.
+ var parsedFields = [];
+ for (var i = 0; i < dispatch.parsers.length; ++i) {
+ var parser = dispatch.parsers[i];
+ if (parser === null) {
+ parsedFields.push(fields[1 + i]);
+ } else if (typeof parser == 'function') {
+ parsedFields.push(parser(fields[1 + i]));
+ } else {
+ // var-args
+ parsedFields.push(fields.slice(1 + i));
+ break;
+ }
+ }
+
+ // Run the processor.
+ dispatch.processor.apply(this, parsedFields);
+};
+
+
+/**
+ * Decompresses a line if it was backreference-compressed.
+ *
+ * @param {string} line Possibly compressed line.
+ * @return {string} Decompressed line.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.expandBackRef_ = function(line) {
+ var backRefPos;
+ // Filter out case when a regexp is created containing '#'.
+ if (line.charAt(line.length - 1) != '"'
+ && (backRefPos = line.lastIndexOf('#')) != -1) {
+ var backRef = line.substr(backRefPos + 1);
+ var backRefIdx = parseInt(backRef, 10) - 1;
+ var colonPos = backRef.indexOf(':');
+ var backRefStart =
+ colonPos != -1 ? parseInt(backRef.substr(colonPos + 1), 10) : 0;
+ line = line.substr(0, backRefPos) +
+ this.backRefs_[backRefIdx].substr(backRefStart);
+ }
+ this.backRefs_.unshift(line);
+ if (this.backRefs_.length > 10) {
+ this.backRefs_.length = 10;
+ }
+ return line;
+};
+
+
+/**
+ * Initializes the map of backward reference compressible commands.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.initBackRefsCommands_ = function() {
+ for (var event in this.dispatchTable_) {
+ var dispatch = this.dispatchTable_[event];
+ if (dispatch && dispatch.backrefs) {
+ this.backRefsCommands_[event] = true;
+ }
+ }
+};
+
+
+/**
+ * Processes alias log record. Adds an alias to a corresponding map.
+ *
+ * @param {string} symbol Short name.
+ * @param {string} expansion Long name.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processAlias_ = function(
+ symbol, expansion) {
+ if (expansion in this.dispatchTable_) {
+ this.dispatchTable_[symbol] = this.dispatchTable_[expansion];
+ if (expansion in this.backRefsCommands_) {
+ this.backRefsCommands_[symbol] = true;
+ }
+ } else {
+ this.aliases_[symbol] = expansion;
+ }
+};
+
+
+/**
+ * Processes log lines.
+ *
+ * @param {Array.<string>} lines Log lines.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
+ var csvParser = new devtools.profiler.CsvParser();
+ try {
+ for (var i = 0, n = lines.length; i < n; ++i) {
+ var line = lines[i];
+ if (!line) {
+ continue;
+ }
+ if (line.charAt(0) == '#' ||
+ line.substr(0, line.indexOf(',')) in this.backRefsCommands_) {
+ line = this.expandBackRef_(line);
+ }
+ var fields = csvParser.parseLine(line);
+ this.dispatchLogRow_(fields);
+ }
+ } catch (e) {
+ this.printError('line ' + (i + 1) + ': ' + (e.message || e));
+ throw e;
+ }
+};
+
+
+/**
+ * Processes repeat log record. Expands it according to calls count and
+ * invokes processing.
+ *
+ * @param {number} count Count.
+ * @param {Array.<string>} cmd Parsed command.
+ * @private
+ */
+devtools.profiler.LogReader.prototype.processRepeat_ = function(count, cmd) {
+ // Replace the repeat-prefixed command from backrefs list with a non-prefixed.
+ this.backRefs_[0] = cmd.join(',');
+ for (var i = 0; i < count; ++i) {
+ this.dispatchLogRow_(cmd);
+ }
+};
diff --git a/V8Binding/v8/tools/oprofile/annotate b/V8Binding/v8/tools/oprofile/annotate
new file mode 100644
index 0000000..a6a8545
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/annotate
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+opannotate --assembly --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
+
diff --git a/V8Binding/v8/tools/oprofile/common b/V8Binding/v8/tools/oprofile/common
new file mode 100644
index 0000000..fd00207
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/common
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# Determine the session directory to use for oprofile.
+[ "$OPROFILE_SESSION_DIR" ] || OPROFILE_SESSION_DIR=/tmp/oprofv8
+
+# If no executable passed as the first parameter assume V8 release mode shell.
+if [[ -x $1 ]]
+then
+ shell_exec=`readlink -f "$1"`
+ # Any additional parameters are for the oprofile command.
+ shift
+else
+ oprofile_tools_path=`cd $(dirname "$0");pwd`
+ [ "$V8_SHELL_DIR" ] || V8_SHELL_DIR=$oprofile_tools_path/../..
+ shell_exec=$V8_SHELL_DIR/shell
+fi
+
+alias sudo_opcontrol='sudo opcontrol --session-dir="$OPROFILE_SESSION_DIR"'
+
diff --git a/V8Binding/v8/tools/oprofile/dump b/V8Binding/v8/tools/oprofile/dump
new file mode 100644
index 0000000..17bb0a1
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/dump
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --dump "@$"
+
diff --git a/V8Binding/v8/tools/oprofile/report b/V8Binding/v8/tools/oprofile/report
new file mode 100644
index 0000000..b7f28b9
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/report
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+opreport --symbols --session-dir="$OPROFILE_SESSION_DIR" "$shell_exec" "$@"
+
diff --git a/V8Binding/v8/tools/oprofile/reset b/V8Binding/v8/tools/oprofile/reset
new file mode 100644
index 0000000..edb7071
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/reset
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --reset "$@"
+
diff --git a/V8Binding/v8/tools/oprofile/run b/V8Binding/v8/tools/oprofile/run
new file mode 100644
index 0000000..0a92470
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/run
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+# Reset oprofile samples.
+sudo_opcontrol --reset
+
+# Run the executable to profile with the correct arguments.
+"$shell_exec" --oprofile "$@"
+
+# Flush oprofile data including the generated code into ELF binaries.
+sudo_opcontrol --dump
+
diff --git a/V8Binding/v8/tools/oprofile/shutdown b/V8Binding/v8/tools/oprofile/shutdown
new file mode 100644
index 0000000..8ebb72f
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/shutdown
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --shutdown "$@"
+
diff --git a/V8Binding/v8/tools/oprofile/start b/V8Binding/v8/tools/oprofile/start
new file mode 100644
index 0000000..059e4b8
--- /dev/null
+++ b/V8Binding/v8/tools/oprofile/start
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Source common stuff.
+. `cd $(dirname "$0");pwd`/common
+
+sudo_opcontrol --start --no-vmlinux "$@"
+
diff --git a/V8Binding/v8/tools/test.py b/V8Binding/v8/tools/test.py
index 6bd536b..f701ceb 100755
--- a/V8Binding/v8/tools/test.py
+++ b/V8Binding/v8/tools/test.py
@@ -372,6 +372,8 @@ class TestOutput(object):
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
+ elif self.HasTimedOut():
+ outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
@@ -390,7 +392,7 @@ class TestOutput(object):
def HasTimedOut(self):
return self.output.timed_out;
-
+
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
diff --git a/V8Binding/v8/tools/tickprocessor.js b/V8Binding/v8/tools/tickprocessor.js
index 477ab26..4afc69f 100644
--- a/V8Binding/v8/tools/tickprocessor.js
+++ b/V8Binding/v8/tools/tickprocessor.js
@@ -52,8 +52,35 @@ function readFile(fileName) {
}
+function inherits(childCtor, parentCtor) {
+ function tempCtor() {};
+ tempCtor.prototype = parentCtor.prototype;
+ childCtor.prototype = new tempCtor();
+};
+
+
function TickProcessor(
cppEntriesProvider, separateIc, ignoreUnknown, stateFilter) {
+ devtools.profiler.LogReader.call(this, {
+ 'shared-library': { parsers: [null, parseInt, parseInt],
+ processor: this.processSharedLibrary },
+ 'code-creation': {
+ parsers: [null, this.createAddressParser('code'), parseInt, null],
+ processor: this.processCodeCreation, backrefs: true },
+ 'code-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processCodeMove, backrefs: true },
+ 'code-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processCodeDelete, backrefs: true },
+ 'tick': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('stack'), parseInt, 'var-args'],
+ processor: this.processTick, backrefs: true },
+ 'profiler': null,
+ // Obsolete row types.
+ 'code-allocate': null,
+ 'begin-code-region': null,
+ 'end-code-region': null });
+
this.cppEntriesProvider_ = cppEntriesProvider;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
@@ -87,6 +114,7 @@ function TickProcessor(
this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
this.lastLogFileName_ = null;
};
+inherits(TickProcessor, devtools.profiler.LogReader);
TickProcessor.VmStates = {
@@ -106,25 +134,15 @@ TickProcessor.CodeTypes = {
// codeTypes_ map because there can be zillions of them.
-TickProcessor.RecordsDispatch = {
- 'shared-library': { parsers: [null, parseInt, parseInt],
- processor: 'processSharedLibrary' },
- 'code-creation': { parsers: [null, parseInt, parseInt, null],
- processor: 'processCodeCreation' },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: 'processCodeMove' },
- 'code-delete': { parsers: [parseInt], processor: 'processCodeDelete' },
- 'tick': { parsers: [parseInt, parseInt, parseInt, 'var-args'],
- processor: 'processTick' },
- 'profiler': null,
- // Obsolete row types.
- 'code-allocate': null,
- 'begin-code-region': null,
- 'end-code-region': null
-};
+TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
-TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
+/**
+ * @override
+ */
+TickProcessor.prototype.printError = function(str) {
+ print(str);
+};
TickProcessor.prototype.setCodeType = function(name, type) {
@@ -150,57 +168,7 @@ TickProcessor.prototype.isJsCode = function(name) {
TickProcessor.prototype.processLogFile = function(fileName) {
this.lastLogFileName_ = fileName;
var contents = readFile(fileName);
- this.processLog(contents.split('\n'));
-};
-
-
-TickProcessor.prototype.processLog = function(lines) {
- var csvParser = new devtools.profiler.CsvParser();
- try {
- for (var i = 0, n = lines.length; i < n; ++i) {
- var line = lines[i];
- if (!line) {
- continue;
- }
- var fields = csvParser.parseLine(line);
- this.dispatchLogRow(fields);
- }
- } catch (e) {
- print('line ' + (i + 1) + ': ' + (e.message || e));
- throw e;
- }
-};
-
-
-TickProcessor.prototype.dispatchLogRow = function(fields) {
- // Obtain the dispatch.
- var command = fields[0];
- if (!(command in TickProcessor.RecordsDispatch)) {
- throw new Error('unknown command: ' + command);
- }
- var dispatch = TickProcessor.RecordsDispatch[command];
-
- if (dispatch === null) {
- return;
- }
-
- // Parse fields.
- var parsedFields = [];
- for (var i = 0; i < dispatch.parsers.length; ++i) {
- var parser = dispatch.parsers[i];
- if (parser === null) {
- parsedFields.push(fields[1 + i]);
- } else if (typeof parser == 'function') {
- parsedFields.push(parser(fields[1 + i]));
- } else {
- // var-args
- parsedFields.push(fields.slice(1 + i));
- break;
- }
- }
-
- // Run the processor.
- this[dispatch.processor].apply(this, parsedFields);
+ this.processLogChunk(contents);
};
@@ -220,7 +188,8 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
type, start, size, name) {
- var entry = this.profile_.addCode(type, name, start, size);
+ var entry = this.profile_.addCode(
+ this.expandAlias(type), name, start, size);
};
@@ -247,15 +216,7 @@ TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
return;
}
- var fullStack = [pc];
- for (var i = 0, n = stack.length; i < n; ++i) {
- var frame = stack[i];
- // Leave only numbers starting with 0x. Filter possible 'overflow' string.
- if (frame.charAt(0) == '0') {
- fullStack.push(parseInt(frame, 16));
- }
- }
- this.profile_.recordTick(fullStack);
+ this.profile_.recordTick(this.processStack(pc, stack));
};
@@ -418,7 +379,9 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
function addPrevEntry(end) {
// Several functions can be mapped onto the same address. To avoid
// creating zero-sized entries, skip such duplicates.
- if (prevEntry && prevEntry.start != end) {
+ // Also double-check that function belongs to the library address space.
+ if (prevEntry && prevEntry.start < end &&
+ prevEntry.start >= libStart && end <= libEnd) {
processorFunc(prevEntry.name, prevEntry.start, end);
}
}
@@ -449,29 +412,28 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function inherits(childCtor, parentCtor) {
- function tempCtor() {};
- tempCtor.prototype = parentCtor.prototype;
- childCtor.prototype = new tempCtor();
-};
-
-
-function UnixCppEntriesProvider() {
+function UnixCppEntriesProvider(nmExec) {
this.symbols = [];
this.parsePos = 0;
+ this.nmExec = nmExec;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
-UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) . (.*)$/;
+UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) [tTwW] (.*)$/;
UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
- this.symbols = [
- os.system('nm', ['-C', '-n', libName], -1, -1),
- os.system('nm', ['-C', '-n', '-D', libName], -1, -1)
- ];
this.parsePos = 0;
+ try {
+ this.symbols = [
+ os.system(this.nmExec, ['-C', '-n', libName], -1, -1),
+ os.system(this.nmExec, ['-C', '-n', '-D', libName], -1, -1)
+ ];
+ } catch (e) {
+ // If the library cannot be found on this system let's not panic.
+ this.symbols = ['', ''];
+ }
};
@@ -564,7 +526,8 @@ function processArguments(args) {
platform: 'unix',
stateFilter: null,
ignoreUnknown: false,
- separateIc: false
+ separateIc: false,
+ nm: 'nm'
};
var argsDispatch = {
'-j': ['stateFilter', TickProcessor.VmStates.JS,
@@ -584,7 +547,9 @@ function processArguments(args) {
'--unix': ['platform', 'unix',
'Specify that we are running on *nix platform'],
'--windows': ['platform', 'windows',
- 'Specify that we are running on Windows platform']
+ 'Specify that we are running on Windows platform'],
+ '--nm': ['nm', 'nm',
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)']
};
argsDispatch['--js'] = argsDispatch['-j'];
argsDispatch['--gc'] = argsDispatch['-g'];
@@ -616,9 +581,15 @@ function processArguments(args) {
break;
}
args.shift();
+ var userValue = null;
+ var eqPos = arg.indexOf('=');
+ if (eqPos != -1) {
+ userValue = arg.substr(eqPos + 1);
+ arg = arg.substr(0, eqPos);
+ }
if (arg in argsDispatch) {
var dispatch = argsDispatch[arg];
- result[dispatch[0]] = dispatch[1];
+ result[dispatch[0]] = userValue == null ? dispatch[1] : userValue;
} else {
printUsageAndExit();
}
@@ -633,7 +604,7 @@ function processArguments(args) {
var params = processArguments(arguments);
var tickProcessor = new TickProcessor(
- params.platform == 'unix' ? new UnixCppEntriesProvider() :
+ params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) :
new WindowsCppEntriesProvider(),
params.separateIc,
params.ignoreUnknown,
diff --git a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
index 2a7cb2d..6e3d276 100755
--- a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
@@ -273,6 +273,7 @@
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
+ 22A76C900FF259E600FDC694 /* log-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-inl.h"; sourceTree = "<group>"; };
58242A1E0FA1F14D00BD6F59 /* json-delay.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "json-delay.js"; sourceTree = "<group>"; };
58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; };
@@ -622,6 +623,7 @@
897FF0D70E719AB300D62E90 /* C++ */ = {
isa = PBXGroup;
children = (
+ 22A76C900FF259E600FDC694 /* log-inl.h */,
897FF0F60E719B8F00D62E90 /* accessors.cc */,
897FF0F70E719B8F00D62E90 /* accessors.h */,
897FF0F80E719B8F00D62E90 /* allocation.cc */,
diff --git a/V8Binding/v8/tools/visual_studio/v8_base.vcproj b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
index afd73f4..bfdcec9 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
@@ -541,6 +541,10 @@
>
</File>
<File
+ RelativePath="..\..\src\log-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.h"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
index ca0a2da..8ebe386 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -537,6 +537,10 @@
>
</File>
<File
+ RelativePath="..\..\src\log-inl.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\log.h"
>
</File>
diff --git a/V8Binding/v8/tools/windows-tick-processor.bat b/V8Binding/v8/tools/windows-tick-processor.bat
index 52454e3..67cbe98 100644
--- a/V8Binding/v8/tools/windows-tick-processor.bat
+++ b/V8Binding/v8/tools/windows-tick-processor.bat
@@ -2,4 +2,4 @@
SET tools_dir=%~dp0
-%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%tickprocessor.js -- --windows %*
+%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js -- --windows %*
diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION
index acaed73..60d833e 100644
--- a/WEBKIT_MERGE_REVISION
+++ b/WEBKIT_MERGE_REVISION
@@ -2,4 +2,4 @@ We sync with Chromium release revision, which has both webkit revision and V8 re
http://src.chromium.org/svn/branches/187/src@18043
http://svn.webkit.org/repository/webkit/trunk@44544
- http://v8.googlecode.com/svn/trunk@2121
+ http://v8.googlecode.com/svn/branches/bleeding_edge@2313