summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--V8Binding/Android.v8common.mk5
-rw-r--r--V8Binding/v8/AUTHORS1
-rw-r--r--V8Binding/v8/ChangeLog49
-rw-r--r--V8Binding/v8/LICENSE20
-rw-r--r--V8Binding/v8/SConstruct34
-rw-r--r--V8Binding/v8/include/v8.h103
-rwxr-xr-xV8Binding/v8/src/SConscript48
-rw-r--r--V8Binding/v8/src/api.cc93
-rw-r--r--V8Binding/v8/src/api.h85
-rw-r--r--V8Binding/v8/src/arguments.h26
-rw-r--r--V8Binding/v8/src/arm/assembler-arm-inl.h29
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.cc33
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.h28
-rw-r--r--V8Binding/v8/src/arm/builtins-arm.cc425
-rw-r--r--V8Binding/v8/src/arm/cfg-arm.cc301
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc185
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h6
-rw-r--r--V8Binding/v8/src/arm/constants-arm.cc92
-rw-r--r--V8Binding/v8/src/arm/constants-arm.h52
-rw-r--r--V8Binding/v8/src/arm/debug-arm.cc54
-rw-r--r--V8Binding/v8/src/arm/disasm-arm.cc19
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc18
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.cc137
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.h77
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.cc59
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.h37
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc20
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.cc12
-rw-r--r--V8Binding/v8/src/array.js4
-rw-r--r--V8Binding/v8/src/assembler.cc2
-rw-r--r--V8Binding/v8/src/ast.cc1
-rw-r--r--V8Binding/v8/src/ast.h49
-rw-r--r--V8Binding/v8/src/bootstrapper.cc12
-rw-r--r--V8Binding/v8/src/bootstrapper.h1
-rw-r--r--V8Binding/v8/src/builtins.cc15
-rw-r--r--V8Binding/v8/src/builtins.h12
-rw-r--r--V8Binding/v8/src/cfg.cc763
-rw-r--r--V8Binding/v8/src/cfg.h871
-rw-r--r--V8Binding/v8/src/codegen.cc45
-rw-r--r--V8Binding/v8/src/codegen.h12
-rw-r--r--V8Binding/v8/src/compiler.cc21
-rw-r--r--V8Binding/v8/src/d8.js7
-rw-r--r--V8Binding/v8/src/debug-agent.cc5
-rw-r--r--V8Binding/v8/src/debug-agent.h5
-rw-r--r--V8Binding/v8/src/debug-delay.js24
-rw-r--r--V8Binding/v8/src/debug.cc91
-rw-r--r--V8Binding/v8/src/debug.h23
-rw-r--r--V8Binding/v8/src/execution.cc114
-rw-r--r--V8Binding/v8/src/execution.h64
-rw-r--r--V8Binding/v8/src/factory.cc15
-rw-r--r--V8Binding/v8/src/factory.h2
-rw-r--r--V8Binding/v8/src/flag-definitions.h2
-rw-r--r--V8Binding/v8/src/handles.cc115
-rw-r--r--V8Binding/v8/src/handles.h6
-rw-r--r--V8Binding/v8/src/heap-profiler.cc626
-rw-r--r--V8Binding/v8/src/heap-profiler.h263
-rw-r--r--V8Binding/v8/src/heap.cc334
-rw-r--r--V8Binding/v8/src/heap.h17
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.cc46
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.h5
-rw-r--r--V8Binding/v8/src/ia32/builtins-ia32.cc488
-rw-r--r--V8Binding/v8/src/ia32/cfg-ia32.cc315
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.cc359
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.h2
-rw-r--r--V8Binding/v8/src/ia32/debug-ia32.cc21
-rw-r--r--V8Binding/v8/src/ia32/ic-ia32.cc37
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc53
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.h68
-rw-r--r--V8Binding/v8/src/ia32/simulator-ia32.h19
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc26
-rw-r--r--V8Binding/v8/src/ic-inl.h2
-rw-r--r--V8Binding/v8/src/ic.cc2
-rw-r--r--V8Binding/v8/src/ic.h2
-rw-r--r--V8Binding/v8/src/list.h7
-rw-r--r--V8Binding/v8/src/log-utils.cc16
-rw-r--r--V8Binding/v8/src/log-utils.h9
-rw-r--r--V8Binding/v8/src/log.cc45
-rw-r--r--V8Binding/v8/src/log.h2
-rw-r--r--V8Binding/v8/src/macro-assembler.h2
-rw-r--r--V8Binding/v8/src/mark-compact.cc60
-rw-r--r--V8Binding/v8/src/messages.js20
-rw-r--r--V8Binding/v8/src/mirror-delay.js5
-rw-r--r--V8Binding/v8/src/objects-debug.cc1
-rw-r--r--V8Binding/v8/src/objects-inl.h10
-rw-r--r--V8Binding/v8/src/objects.cc268
-rw-r--r--V8Binding/v8/src/objects.h103
-rw-r--r--V8Binding/v8/src/parser.cc183
-rw-r--r--V8Binding/v8/src/platform-freebsd.cc20
-rw-r--r--V8Binding/v8/src/platform-linux.cc6
-rw-r--r--V8Binding/v8/src/platform-macos.cc22
-rw-r--r--V8Binding/v8/src/prettyprinter.cc10
-rw-r--r--V8Binding/v8/src/regexp-stack.cc8
-rw-r--r--V8Binding/v8/src/regexp-stack.h2
-rw-r--r--V8Binding/v8/src/rewriter.cc12
-rw-r--r--V8Binding/v8/src/runtime.cc223
-rw-r--r--V8Binding/v8/src/runtime.h420
-rw-r--r--V8Binding/v8/src/serialize.cc49
-rw-r--r--V8Binding/v8/src/serialize.h5
-rw-r--r--V8Binding/v8/src/spaces.cc150
-rw-r--r--V8Binding/v8/src/spaces.h86
-rw-r--r--V8Binding/v8/src/string-stream.cc2
-rw-r--r--V8Binding/v8/src/string-stream.h2
-rw-r--r--V8Binding/v8/src/string.js78
-rw-r--r--V8Binding/v8/src/stub-cache.cc41
-rw-r--r--V8Binding/v8/src/third_party/dtoa/dtoa.c18
-rw-r--r--V8Binding/v8/src/top.cc33
-rw-r--r--V8Binding/v8/src/top.h7
-rw-r--r--V8Binding/v8/src/uri.js10
-rw-r--r--V8Binding/v8/src/usage-analyzer.cc53
-rw-r--r--V8Binding/v8/src/utils.cc2
-rw-r--r--V8Binding/v8/src/v8-counters.h2
-rw-r--r--V8Binding/v8/src/v8.cc14
-rw-r--r--V8Binding/v8/src/v8.h5
-rw-r--r--V8Binding/v8/src/v8natives.js18
-rw-r--r--V8Binding/v8/src/v8threads.cc34
-rw-r--r--V8Binding/v8/src/v8threads.h1
-rw-r--r--V8Binding/v8/src/variables.h8
-rw-r--r--V8Binding/v8/src/version.cc2
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc48
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h21
-rw-r--r--V8Binding/v8/src/x64/builtins-x64.cc505
-rw-r--r--V8Binding/v8/src/x64/cfg-x64.cc322
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc825
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h2
-rw-r--r--V8Binding/v8/src/x64/debug-x64.cc14
-rw-r--r--V8Binding/v8/src/x64/ic-x64.cc151
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc880
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h325
-rw-r--r--V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--V8Binding/v8/src/x64/simulator-x64.h19
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc89
-rw-r--r--V8Binding/v8/src/x64/virtual-frame-x64.cc4
-rw-r--r--V8Binding/v8/src/zone-inl.h19
-rw-r--r--V8Binding/v8/src/zone.h8
-rw-r--r--V8Binding/v8/test/cctest/SConscript1
-rw-r--r--V8Binding/v8/test/cctest/cctest.status2
-rw-r--r--V8Binding/v8/test/cctest/test-alloc.cc69
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc116
-rw-r--r--V8Binding/v8/test/cctest/test-assembler-arm.cc6
-rw-r--r--V8Binding/v8/test/cctest/test-conversions.cc8
-rw-r--r--V8Binding/v8/test/cctest/test-debug.cc12
-rw-r--r--V8Binding/v8/test/cctest/test-heap-profiler.cc396
-rw-r--r--V8Binding/v8/test/cctest/test-log.cc7
-rw-r--r--V8Binding/v8/test/cctest/test-sockets.cc1
-rw-r--r--V8Binding/v8/test/cctest/test-strings.cc28
-rw-r--r--V8Binding/v8/test/es5conform/README14
-rw-r--r--V8Binding/v8/test/es5conform/es5conform.status68
-rw-r--r--V8Binding/v8/test/es5conform/harness-adapt.js74
-rw-r--r--V8Binding/v8/test/es5conform/testcfg.py108
-rw-r--r--V8Binding/v8/test/mjsunit/arguments-enum.js14
-rw-r--r--V8Binding/v8/test/mjsunit/array-constructor.js119
-rw-r--r--V8Binding/v8/test/mjsunit/array-splice.js3
-rw-r--r--V8Binding/v8/test/mjsunit/class-of-builtins.js2
-rw-r--r--V8Binding/v8/test/mjsunit/debug-compile-event.js4
-rw-r--r--V8Binding/v8/test/mjsunit/debug-scopes.js101
-rw-r--r--V8Binding/v8/test/mjsunit/debug-stepout-recursive-function.js106
-rw-r--r--V8Binding/v8/test/mjsunit/debug-stepout-to-builtin.js84
-rw-r--r--V8Binding/v8/test/mjsunit/function-prototype.js5
-rw-r--r--V8Binding/v8/test/mjsunit/invalid-lhs.js11
-rw-r--r--V8Binding/v8/test/mjsunit/invalid-source-element.js (renamed from V8Binding/v8/test/mjsunit/array-splice-webkit.js)37
-rw-r--r--V8Binding/v8/test/mjsunit/mirror-script.js16
-rw-r--r--V8Binding/v8/test/mjsunit/mjsunit.status24
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-220.js2
-rw-r--r--V8Binding/v8/test/mjsunit/smi-negative-zero.js60
-rw-r--r--V8Binding/v8/test/mjsunit/switch.js4
-rw-r--r--V8Binding/v8/test/mjsunit/testcfg.py3
-rw-r--r--V8Binding/v8/test/mjsunit/third_party/array-splice-webkit.js62
-rw-r--r--V8Binding/v8/test/mjsunit/third_party/object-keys.js68
-rw-r--r--V8Binding/v8/test/mjsunit/third_party/regexp-pcre.js (renamed from V8Binding/v8/test/mjsunit/regexp-pcre.js)0
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status7
-rw-r--r--V8Binding/v8/tools/gyp/v8.gyp13
-rwxr-xr-xV8Binding/v8/tools/js2c.py20
-rw-r--r--V8Binding/v8/tools/jsmin.py496
-rwxr-xr-xV8Binding/v8/tools/presubmit.py11
-rwxr-xr-xV8Binding/v8/tools/run-valgrind.py2
-rwxr-xr-xV8Binding/v8/tools/test.py6
-rw-r--r--V8Binding/v8/tools/v8.xcodeproj/project.pbxproj12
-rw-r--r--V8Binding/v8/tools/visual_studio/common.vsprops2
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base.vcproj20
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj24
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base_x64.vcproj20
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_cctest.vcproj4
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj4
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj4
-rw-r--r--WEBKIT_MERGE_REVISION2
185 files changed, 8577 insertions, 6027 deletions
diff --git a/V8Binding/Android.v8common.mk b/V8Binding/Android.v8common.mk
index c8781df..f64a5af 100644
--- a/V8Binding/Android.v8common.mk
+++ b/V8Binding/Android.v8common.mk
@@ -8,7 +8,6 @@ V8_LOCAL_SRC_FILES := \
src/ast.cc \
src/bootstrapper.cc \
src/builtins.cc \
- src/cfg.cc \
src/checks.cc \
src/code-stubs.cc \
src/codegen.cc \
@@ -72,7 +71,6 @@ ifeq ($(TARGET_ARCH),arm)
V8_LOCAL_SRC_FILES += \
src/arm/assembler-arm.cc \
src/arm/builtins-arm.cc \
- src/arm/cfg-arm.cc \
src/arm/codegen-arm.cc \
src/arm/cpu-arm.cc \
src/arm/disasm-arm.cc \
@@ -90,7 +88,6 @@ ifeq ($(TARGET_ARCH),x86)
V8_LOCAL_SRC_FILES += \
src/ia32/assembler-ia32.cc \
src/ia32/builtins-ia32.cc \
- src/ia32/cfg-ia32.cc \
src/ia32/codegen-ia32.cc \
src/ia32/cpu-ia32.cc \
src/ia32/disasm-ia32.cc \
@@ -128,5 +125,3 @@ V8_LOCAL_JS_LIBRARY_FILES := \
src/regexp-delay.js \
src/json-delay.js \
src/macros.py
-
-
diff --git a/V8Binding/v8/AUTHORS b/V8Binding/v8/AUTHORS
index 5c5ae4e..de8cabb 100644
--- a/V8Binding/v8/AUTHORS
+++ b/V8Binding/v8/AUTHORS
@@ -10,6 +10,7 @@ Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
+Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
Matt Hanselman <mjhanselman@gmail.com>
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index b07e7cc..8c74591 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,52 @@
+2009-09-23: Version 1.3.13
+
+ Fixed uninitialized memory problem.
+
+ Improved heap profiler support.
+
+
+2009-09-22: Version 1.3.12
+
+ Changed behavior of |function|.toString() on built-in functions to
+ be compatible with other implementations. Patch by Jan de Mooij.
+
+ Added Object::IsDirty in the API.
+
+ Optimized array construction; it is now handled purely in native
+ code.
+
+ [ES5] Made properties of the arguments array enumerable.
+
+ [ES5] Added test suite adapter for the es5conform test suite.
+
+ [ES5] Added Object.keys function.
+
+
+2009-09-15: Version 1.3.11
+
+ Fixed crash in error reporting during bootstrapping.
+
+ Optimized generated IA32 math code by using SSE2 instructions when
+ available.
+
+ Implemented missing pieces of debugger infrastructure on ARM. The
+ debugger is now fully functional on ARM.
+
+ Make 'hidden' the default visibility for gcc.
+
+
+2009-09-09: Version 1.3.10
+
+ Fixed profiler on Mac in 64-bit mode.
+
+ Optimized creation of objects from simple constructor functions on
+ ARM.
+
+ Fixed a number of debugger issues.
+
+ Reduced the amount of memory consumed by V8.
+
+
2009-09-02: Version 1.3.9
Optimized stack guard checks on ARM.
diff --git a/V8Binding/v8/LICENSE b/V8Binding/v8/LICENSE
index 553cf47..e3ed242 100644
--- a/V8Binding/v8/LICENSE
+++ b/V8Binding/v8/LICENSE
@@ -2,10 +2,15 @@ This license applies to all parts of V8 that are not externally
maintained libraries. The externally maintained libraries used by V8
are:
- - PCRE test suite, located in test/mjsunit/regexp-pcre.js. This is
- based on the test suite from PCRE-7.3, which is copyrighted by the
- University of Cambridge and Google, Inc. The copyright notice and
- license are embedded in regexp-pcre.js.
+ - PCRE test suite, located in
+ test/mjsunit/third_party/regexp-pcre.js. This is based on the
+ test suite from PCRE-7.3, which is copyrighted by the University
+ of Cambridge and Google, Inc. The copyright notice and license
+ are embedded in regexp-pcre.js.
+
+ - Layout tests, located in test/mjsunit/third_party. These are
+ based on layout tests from webkit.org which are copyrighted by
+ Apple Computer, Inc. and released under a 3-clause BSD license.
- Dtoa, located under third_party/dtoa. This code is copyrighted by
David M. Gay and released under an MIT license.
@@ -16,13 +21,6 @@ are:
This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license.
- - JSMin JavaScript minifier, located at tools/jsmin.py. This code is
- copyrighted by Douglas Crockford and Baruch Even and released under
- an MIT license.
-
- - Valgrind client API header, located at third_party/valgrind/valgrind.h
- This is release under the BSD license.
-
- Valgrind client API header, located at third_party/valgrind/valgrind.h
This is release under the BSD license.
diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct
index ddd0190..b5aa7ab 100644
--- a/V8Binding/v8/SConstruct
+++ b/V8Binding/v8/SConstruct
@@ -96,13 +96,18 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = {
'all': {
- 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
},
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS']
+ },
+ 'profilingsupport:on': {
+ 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
+ },
+ 'debuggersupport:on': {
+ 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
}
},
'gcc': {
@@ -110,11 +115,14 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
+ 'visibility:hidden': {
+ # Use visibility=default to disable this.
+ 'CXXFLAGS': ['-fvisibility=hidden']
+ },
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
- 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb']
}
},
@@ -123,7 +131,7 @@ LIBRARY_FLAGS = {
'-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
- 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
+ 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'os:linux': {
@@ -229,7 +237,6 @@ LIBRARY_FLAGS = {
V8_EXTRA_FLAGS = {
'gcc': {
'all': {
- 'CXXFLAGS': [], #['-fvisibility=hidden'],
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
@@ -576,6 +583,16 @@ SIMPLE_OPTIONS = {
'default': 'static',
'help': 'the type of library to produce'
},
+ 'profilingsupport': {
+ 'values': ['on', 'off'],
+ 'default': 'on',
+ 'help': 'enable profiling of JavaScript code'
+ },
+ 'debuggersupport': {
+ 'values': ['on', 'off'],
+ 'default': 'on',
+ 'help': 'enable debugging of JavaScript code'
+ },
'soname': {
'values': ['on', 'off'],
'default': 'off',
@@ -615,6 +632,11 @@ SIMPLE_OPTIONS = {
'values': ['on', 'off'],
'default': 'off',
'help': 'more output from compiler and linker'
+ },
+ 'visibility': {
+ 'values': ['default', 'hidden'],
+ 'default': 'hidden',
+ 'help': 'shared library symbol visibility'
}
}
@@ -794,6 +816,10 @@ def PostprocessOptions(options):
# Print a warning if arch has explicitly been set
print "Warning: forcing architecture to match simulator (%s)" % options['simulator']
options['arch'] = options['simulator']
+ if (options['prof'] != 'off') and (options['profilingsupport'] == 'off'):
+ # Print a warning if profiling is enabled without profiling support
+ print "Warning: forcing profilingsupport on when prof is on"
+ options['profilingsupport'] = 'on'
def ParseEnvOverrides(arg, imports):
diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h
index 8cd49f8..4992d75 100644
--- a/V8Binding/v8/include/v8.h
+++ b/V8Binding/v8/include/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -130,6 +130,7 @@ class Data;
namespace internal {
class Object;
+class Arguments;
}
@@ -1024,8 +1025,8 @@ class V8EXPORT String : public Primitive {
public:
explicit Value(Handle<v8::Value> obj);
~Value();
- uint16_t* operator*() const { return str_; }
- const uint16_t* operator*() { return str_; }
+ uint16_t* operator*() { return str_; }
+ const uint16_t* operator*() const { return str_; }
int length() const { return length_; }
private:
uint16_t* str_;
@@ -1205,7 +1206,14 @@ class V8EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Handle<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
@@ -1238,6 +1246,15 @@ class V8EXPORT Object : public Value {
bool SetHiddenValue(Handle<String> key, Handle<Value> value);
Local<Value> GetHiddenValue(Handle<String> key);
bool DeleteHiddenValue(Handle<String> key);
+
+ /**
+ * Returns true if this is an instance of an api function (one
+ * created from a function created from a function template) and has
+ * been modified since it was created. Note that this method is
+ * conservative and may return true for objects that haven't actually
+ * been modified.
+ */
+ bool IsDirty();
/**
* Clone this object with a fast but shallow copy. Values will point
@@ -1392,17 +1409,13 @@ class V8EXPORT Arguments {
*/
class V8EXPORT AccessorInfo {
public:
- inline AccessorInfo(Local<Object> self,
- Local<Value> data,
- Local<Object> holder)
- : self_(self), data_(data), holder_(holder) { }
+ inline AccessorInfo(internal::Object** args)
+ : args_(args) { }
inline Local<Value> Data() const;
inline Local<Object> This() const;
inline Local<Object> Holder() const;
private:
- Local<Object> self_;
- Local<Value> data_;
- Local<Object> holder_;
+ internal::Object** args_;
};
@@ -1537,9 +1550,9 @@ enum AccessType {
/**
* Returns true if cross-context access should be allowed to the named
- * property with the given key on the global object.
+ * property with the given key on the host object.
*/
-typedef bool (*NamedSecurityCallback)(Local<Object> global,
+typedef bool (*NamedSecurityCallback)(Local<Object> host,
Local<Value> key,
AccessType type,
Local<Value> data);
@@ -1547,9 +1560,9 @@ typedef bool (*NamedSecurityCallback)(Local<Object> global,
/**
* Returns true if cross-context access should be allowed to the indexed
- * property with the given index on the global object.
+ * property with the given index on the host object.
*/
-typedef bool (*IndexedSecurityCallback)(Local<Object> global,
+typedef bool (*IndexedSecurityCallback)(Local<Object> host,
uint32_t index,
AccessType type,
Local<Value> data);
@@ -1558,7 +1571,10 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> global,
/**
* A FunctionTemplate is used to create functions at runtime. There
* can only be one function created from a FunctionTemplate in a
- * context.
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
*
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
@@ -1965,8 +1981,13 @@ Handle<Boolean> V8EXPORT False();
/**
- * A set of constraints that specifies the limits of the runtime's
- * memory use.
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
*/
class V8EXPORT ResourceConstraints {
public:
@@ -1976,6 +1997,7 @@ class V8EXPORT ResourceConstraints {
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
+ // Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
private:
int max_young_space_size_;
@@ -2183,7 +2205,8 @@ class V8EXPORT V8 {
/**
* Initializes from snapshot if possible. Otherwise, attempts to
- * initialize from scratch.
+ * initialize from scratch. This function is called implicitly if
+ * you use the API without calling it first.
*/
static bool Initialize();
@@ -2725,23 +2748,23 @@ class Internals {
// These constants are compiler dependent so their values must be
// defined within the implementation.
- static int kJSObjectType;
- static int kFirstNonstringType;
- static int kProxyType;
+ V8EXPORT static int kJSObjectType;
+ V8EXPORT static int kFirstNonstringType;
+ V8EXPORT static int kProxyType;
static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
-
+
static inline bool HasSmiTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
}
-
+
static inline int SmiValue(internal::Object* value) {
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
}
-
+
static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
@@ -2854,21 +2877,6 @@ int Arguments::Length() const {
}
-Local<Value> AccessorInfo::Data() const {
- return data_;
-}
-
-
-Local<Object> AccessorInfo::This() const {
- return self_;
-}
-
-
-Local<Object> AccessorInfo::Holder() const {
- return holder_;
-}
-
-
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
internal::Object** before = reinterpret_cast<internal::Object**>(*value);
@@ -3066,6 +3074,21 @@ External* External::Cast(v8::Value* value) {
}
+Local<Value> AccessorInfo::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[-3]));
+}
+
+
+Local<Object> AccessorInfo::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[0]));
+}
+
+
+Local<Object> AccessorInfo::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[-1]));
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/V8Binding/v8/src/SConscript b/V8Binding/v8/src/SConscript
index fee3fab..b6c2b4d 100755
--- a/V8Binding/v8/src/SConscript
+++ b/V8Binding/v8/src/SConscript
@@ -36,16 +36,16 @@ Import('context')
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
- 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
- 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
- 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
- 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
- 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
- 'hashmap.cc', 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc',
- 'jsregexp.cc', 'jump-target.cc', 'log.cc', 'log-utils.cc',
- 'mark-compact.cc', 'messages.cc', 'objects.cc', 'oprofile-agent.cc',
- 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+ 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+ 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+ 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+ 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+ 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+ 'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+ 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+ 'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+ 'property.cc', 'regexp-macro-assembler.cc',
'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
@@ -55,29 +55,29 @@ SOURCES = {
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
- 'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+ 'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
- 'arm/regexp-macro-assembler-arm.cc',
- 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
- 'arm/virtual-frame-arm.cc'
+ 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+ 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
- 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+ 'ia32/regexp-macro-assembler-ia32.cc',
+ 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+ 'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+ 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+ 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+ 'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+ 'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+ 'x64/virtual-frame-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc
index 1128d3e..fd3d921 100644
--- a/V8Binding/v8/src/api.cc
+++ b/V8Binding/v8/src/api.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
@@ -71,7 +72,7 @@ namespace v8 {
thread_local.DecrementCallDepth(); \
if (has_pending_exception) { \
if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \
- if (!thread_local.IgnoreOutOfMemory()) \
+ if (!thread_local.ignore_out_of_memory()) \
i::V8::FatalProcessOutOfMemory(NULL); \
} \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
@@ -341,9 +342,12 @@ ResourceConstraints::ResourceConstraints()
bool SetResourceConstraints(ResourceConstraints* constraints) {
- bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
- constraints->max_old_space_size());
- if (!result) return false;
+ int semispace_size = constraints->max_young_space_size();
+ int old_gen_size = constraints->max_old_space_size();
+ if (semispace_size != 0 || old_gen_size != 0) {
+ bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+ if (!result) return false;
+ }
if (constraints->stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
i::StackGuard::SetStackLimit(limit);
@@ -1191,6 +1195,7 @@ v8::TryCatch::TryCatch()
exception_(i::Heap::the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
+ can_continue_(true),
capture_message_(true),
js_handler_(NULL) {
i::Top::RegisterTryCatchHandler(this);
@@ -1897,6 +1902,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::Set()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1917,6 +1923,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -1935,6 +1942,7 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::ForceDelete()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE();
@@ -1988,7 +1996,8 @@ Local<Array> v8::Object::GetPropertyNames() {
ENTER_V8;
v8::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::FixedArray> value = i::GetKeysInFixedArrayFor(self);
+ i::Handle<i::FixedArray> value =
+ i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -2119,7 +2128,7 @@ bool v8::Object::HasIndexedLookupInterceptor() {
}
-Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()",
return Local<Value>());
@@ -2140,12 +2149,32 @@ Handle<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
}
+Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
+ ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local<Value>());
+ ENTER_V8;
+ i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+ i::LookupResult lookup;
+ self_obj->LookupRealNamedProperty(*key_obj, &lookup);
+ if (lookup.IsValid()) {
+ PropertyAttributes attributes;
+ i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
+ &lookup,
+ *key_obj,
+ &attributes));
+ return Utils::ToLocal(result);
+ }
+ return Local<Value>(); // No real property was found in prototype chain.
+}
+
+
// Turns on access checks by copying the map and setting the check flag.
// Because the object gets a new map, existing inline cache caching
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Map> new_map =
@@ -2155,6 +2184,11 @@ void v8::Object::TurnOnAccessCheck() {
}
+bool v8::Object::IsDirty() {
+ return Utils::OpenHandle(this)->IsDirty();
+}
+
+
Local<v8::Object> v8::Object::Clone() {
ON_BAILOUT("v8::Object::Clone()", return Local<Object>());
ENTER_V8;
@@ -2170,6 +2204,7 @@ Local<v8::Object> v8::Object::Clone() {
int v8::Object::GetIdentityHash() {
ON_BAILOUT("v8::Object::GetIdentityHash()", return 0);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
@@ -2199,6 +2234,7 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
ON_BAILOUT("v8::Object::SetHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -2238,6 +2274,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ON_BAILOUT("v8::DeleteHiddenValue()", return false);
ENTER_V8;
+ HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, false));
if (hidden_props->IsUndefined()) {
@@ -2252,6 +2289,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
+ HandleScope scope;
if (!ApiCheck(i::Smi::IsValid(length),
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
@@ -2412,20 +2450,14 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
- // Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
- str->TryFlattenIfNotFlat();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
- int i;
- for (i = 0; i < end; i++)
- buffer[i] = write_input_buffer.GetNext();
- if (length == -1 || i < length)
- buffer[i] = '\0';
- return i;
+ i::String::WriteToFlat(*str, buffer, start, end);
+ if (length == -1 || end < length)
+ buffer[end] = '\0';
+ return end;
}
@@ -2571,7 +2603,9 @@ bool v8::V8::Dispose() {
bool v8::V8::IdleNotification(bool is_high_priority) {
- if (!i::V8::IsRunning()) return false;
+ // Returning true tells the caller that it need not
+ // continue to call IdleNotification.
+ if (!i::V8::IsRunning()) return true;
return i::V8::IdleNotification(is_high_priority);
}
@@ -2672,9 +2706,7 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
- if (!ApiCheck(!env.is_null(),
- "v8::Context::New()",
- "Could not initialize environment"))
+ if (env.is_null())
return Persistent<Context>();
return Persistent<Context>(Utils::ToLocal(env));
}
@@ -2735,7 +2767,9 @@ v8::Local<v8::Context> Context::GetCurrent() {
v8::Local<v8::Context> Context::GetCalling() {
if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
- i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+ i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+ if (calling.is_null()) return Local<Context>();
+ i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
}
@@ -3182,7 +3216,7 @@ Local<Integer> v8::Integer::New(int32_t value) {
void V8::IgnoreOutOfMemoryException() {
- thread_local.SetIgnoreOutOfMemory(true);
+ thread_local.set_ignore_out_of_memory(true);
}
@@ -3664,6 +3698,11 @@ HandleScopeImplementer* HandleScopeImplementer::instance() {
}
+void HandleScopeImplementer::FreeThreadResources() {
+ thread_local.Free();
+}
+
+
char* HandleScopeImplementer::ArchiveThread(char* storage) {
return thread_local.ArchiveThreadHelper(storage);
}
@@ -3675,7 +3714,7 @@ char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) {
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
- Initialize();
+ ResetAfterArchive();
current->Initialize();
return storage + ArchiveSpacePerThread();
@@ -3701,14 +3740,14 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
// Iterate over all handles in the blocks except for the last.
- for (int i = Blocks()->length() - 2; i >= 0; --i) {
- Object** block = Blocks()->at(i);
+ for (int i = blocks()->length() - 2; i >= 0; --i) {
+ Object** block = blocks()->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
- if (!Blocks()->is_empty()) {
- v->VisitPointers(Blocks()->last(), handle_scope_data_.next);
+ if (!blocks()->is_empty()) {
+ v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
if (!saved_contexts_.is_empty()) {
diff --git a/V8Binding/v8/src/api.h b/V8Binding/v8/src/api.h
index 9ae6307..1221f35 100644
--- a/V8Binding/v8/src/api.h
+++ b/V8Binding/v8/src/api.h
@@ -311,20 +311,12 @@ class HandleScopeImplementer {
public:
HandleScopeImplementer()
- : blocks(0),
+ : blocks_(0),
entered_contexts_(0),
- saved_contexts_(0) {
- Initialize();
- }
-
- void Initialize() {
- blocks.Initialize(0);
- entered_contexts_.Initialize(0);
- saved_contexts_.Initialize(0);
- spare = NULL;
- ignore_out_of_memory = false;
- call_depth = 0;
- }
+ saved_contexts_(0),
+ spare_(NULL),
+ ignore_out_of_memory_(false),
+ call_depth_(0) { }
static HandleScopeImplementer* instance();
@@ -332,6 +324,7 @@ class HandleScopeImplementer {
static int ArchiveSpacePerThread();
static char* RestoreThread(char* from);
static char* ArchiveThread(char* to);
+ static void FreeThreadResources();
// Garbage collection support.
static void Iterate(v8::internal::ObjectVisitor* v);
@@ -341,9 +334,9 @@ class HandleScopeImplementer {
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
- inline void IncrementCallDepth() {call_depth++;}
- inline void DecrementCallDepth() {call_depth--;}
- inline bool CallDepthIsZero() { return call_depth == 0; }
+ inline void IncrementCallDepth() {call_depth_++;}
+ inline void DecrementCallDepth() {call_depth_--;}
+ inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
inline bool LeaveLastContext();
@@ -356,20 +349,44 @@ class HandleScopeImplementer {
inline Context* RestoreContext();
inline bool HasSavedContexts();
- inline List<internal::Object**>* Blocks() { return &blocks; }
-
- inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
- inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+ inline List<internal::Object**>* blocks() { return &blocks_; }
+ inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+ inline void set_ignore_out_of_memory(bool value) {
+ ignore_out_of_memory_ = value;
+ }
private:
- List<internal::Object**> blocks;
- Object** spare;
- int call_depth;
+ void ResetAfterArchive() {
+ blocks_.Initialize(0);
+ entered_contexts_.Initialize(0);
+ saved_contexts_.Initialize(0);
+ spare_ = NULL;
+ ignore_out_of_memory_ = false;
+ call_depth_ = 0;
+ }
+
+ void Free() {
+ ASSERT(blocks_.length() == 0);
+ ASSERT(entered_contexts_.length() == 0);
+ ASSERT(saved_contexts_.length() == 0);
+ blocks_.Free();
+ entered_contexts_.Free();
+ saved_contexts_.Free();
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
+ }
+ ASSERT(call_depth_ == 0);
+ }
+
+ List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
- bool ignore_out_of_memory;
+ Object** spare_;
+ bool ignore_out_of_memory_;
+ int call_depth_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@@ -419,32 +436,32 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
- internal::Object** block = (spare != NULL) ?
- spare :
+ internal::Object** block = (spare_ != NULL) ?
+ spare_ :
NewArray<internal::Object*>(kHandleBlockSize);
- spare = NULL;
+ spare_ = NULL;
return block;
}
void HandleScopeImplementer::DeleteExtensions(int extensions) {
- if (spare != NULL) {
- DeleteArray(spare);
- spare = NULL;
+ if (spare_ != NULL) {
+ DeleteArray(spare_);
+ spare_ = NULL;
}
for (int i = extensions; i > 1; --i) {
- internal::Object** block = blocks.RemoveLast();
+ internal::Object** block = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = blocks.RemoveLast();
+ spare_ = blocks_.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- spare,
- &spare[kHandleBlockSize]);
+ spare_,
+ &spare_[kHandleBlockSize]);
#endif
}
diff --git a/V8Binding/v8/src/arguments.h b/V8Binding/v8/src/arguments.h
index 80f9006..d2f1bfc 100644
--- a/V8Binding/v8/src/arguments.h
+++ b/V8Binding/v8/src/arguments.h
@@ -45,6 +45,9 @@ namespace internal {
class Arguments BASE_EMBEDDED {
public:
+ Arguments(int length, Object** arguments)
+ : length_(length), arguments_(arguments) { }
+
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
return arguments_[-index];
@@ -61,11 +64,34 @@ class Arguments BASE_EMBEDDED {
// Get the total number of arguments including the receiver.
int length() const { return length_; }
+ Object** arguments() { return arguments_; }
+
private:
int length_;
Object** arguments_;
};
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+ inline CustomArguments(Object *data,
+ JSObject *self,
+ JSObject *holder) {
+ values_[3] = self;
+ values_[2] = holder;
+ values_[1] = Smi::FromInt(0);
+ values_[0] = data;
+ }
+ void IterateInstance(ObjectVisitor* v);
+ Object** end() { return values_ + 3; }
+ private:
+ Object* values_[4];
+};
+
+
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
diff --git a/V8Binding/v8/src/arm/assembler-arm-inl.h b/V8Binding/v8/src/arm/assembler-arm-inl.h
index cb5faa2..cd5a1bb 100644
--- a/V8Binding/v8/src/arm/assembler-arm-inl.h
+++ b/V8Binding/v8/src/arm/assembler-arm-inl.h
@@ -105,40 +105,45 @@ Address* RelocInfo::target_reference_address() {
Address RelocInfo::call_address() {
ASSERT(IsCallInstruction());
- UNIMPLEMENTED();
- return NULL;
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsCallInstruction());
- UNIMPLEMENTED();
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
- ASSERT(IsCallInstruction());
- UNIMPLEMENTED();
- return NULL;
+ return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsCallInstruction());
- UNIMPLEMENTED();
- return NULL;
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
- ASSERT(IsCallInstruction());
- UNIMPLEMENTED();
+ *call_object_address() = target;
}
bool RelocInfo::IsCallInstruction() {
- UNIMPLEMENTED();
- return false;
+ // On ARM a "call instruction" is actually two instructions.
+ // mov lr, pc
+ // ldr pc, [pc, #XXX]
+ return (Assembler::instr_at(pc_) == kMovLrPc)
+ && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+ == kLdrPCPattern);
}
diff --git a/V8Binding/v8/src/arm/assembler-arm.cc b/V8Binding/v8/src/arm/assembler-arm.cc
index 8bd06db..bc3b8e6 100644
--- a/V8Binding/v8/src/arm/assembler-arm.cc
+++ b/V8Binding/v8/src/arm/assembler-arm.cc
@@ -93,7 +93,14 @@ const int RelocInfo::kApplyMask = 0;
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
- UNIMPLEMENTED();
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
@@ -232,6 +239,10 @@ static const Instr kPushRegPattern =
// register r is not encoded.
static const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
// spare_buffer_
static const int kMinimalBufferSize = 4*KB;
@@ -1301,6 +1312,13 @@ void Assembler::lea(Register dst,
// Debugging
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
CheckBuffer();
@@ -1387,16 +1405,20 @@ void Assembler::GrowBuffer() {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
- rinfo.set_pc(rinfo.pc() + pc_delta);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
}
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::COMMENT && rmode <= RelocInfo::STATEMENT_POSITION) {
- // adjust code for new modes
- ASSERT(RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode));
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ // Adjust code for new modes
+ ASSERT(RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode));
// these modes do not need an entry in the constant pool
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
@@ -1490,6 +1512,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
+
// Instruction to patch must be a ldr/str [pc, #offset]
// P and U set, B and W clear, Rn == pc, offset12 still 0
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
diff --git a/V8Binding/v8/src/arm/assembler-arm.h b/V8Binding/v8/src/arm/assembler-arm.h
index 63f0447..d1df08c 100644
--- a/V8Binding/v8/src/arm/assembler-arm.h
+++ b/V8Binding/v8/src/arm/assembler-arm.h
@@ -376,6 +376,10 @@ class MemOperand BASE_EMBEDDED {
typedef int32_t Instr;
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -433,12 +437,16 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
// Distance between the instruction referring to the address of the call
// target (ldr pc, [target addr in const pool]) and the return address
- static const int kPatchReturnSequenceLength = sizeof(Instr);
+ static const int kCallTargetAddressOffset = kInstrSize;
+
// Distance between start of patched return sequence and the emitted address
// to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1;
+ static const int kPatchReturnSequenceAddressOffset = kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
@@ -637,8 +645,8 @@ class Assembler : public Malloced {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
- void pop(Register dst) {
- ldr(dst, MemOperand(sp, 4, PostIndex), al);
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
}
void pop() {
@@ -652,9 +660,16 @@ class Assembler : public Malloced {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
+ // Check the code size generated from label to here.
+ int InstructionsGeneratedSince(Label* l) {
+ return (pc_offset() - l->pos()) / kInstrSize;
+ }
// Debugging
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
@@ -671,7 +686,7 @@ class Assembler : public Malloced {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
- Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
@@ -708,7 +723,6 @@ class Assembler : public Malloced {
int next_buffer_check_; // pc offset of next buffer check
// Code generation
- static const int kInstrSize = sizeof(Instr); // signed size
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
@@ -795,6 +809,8 @@ class Assembler : public Malloced {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
friend class RegExpMacroAssemblerARM;
+ friend class RelocInfo;
+ friend class CodePatcher;
};
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/builtins-arm.cc b/V8Binding/v8/src/arm/builtins-arm.cc
index 920110f..d7afb37 100644
--- a/V8Binding/v8/src/arm/builtins-arm.cc
+++ b/V8Binding/v8/src/arm/builtins-arm.cc
@@ -44,10 +44,417 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ str(r1, MemOperand(ip, 0));
// The actual argument count has already been loaded into register
- // r0, but JumpToBuiltin expects r0 to contain the number of
+ // r0, but JumpToRuntime expects r0 to contain the number of
// arguments including the receiver.
__ add(r0, r0, Operand(1));
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ ldr(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, Operand(0));
+ __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, MemOperand(result, JSArray::kSize));
+ __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ __ mov(scratch3, Operand(initial_capacity));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+ }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ ldr(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ tst(array_size, array_size);
+ __ b(nz, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size / kPointerSize,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ mov(elements_array_end,
+ Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+ __ add(elements_array_end,
+ elements_array_end,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ AllocateInNewSpace(elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ add(elements_array_storage, result, Operand(JSArray::kSize));
+ __ str(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ and_(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ // Convert array_size from smi to value.
+ __ mov(array_size,
+ Operand(array_size, ASR, kSmiTagSize));
+ __ tst(array_size, array_size);
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is not stored as a smi.
+ __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ str(array_size,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: size of elements array
+ __ add(elements_array_end,
+ elements_array_storage,
+ Operand(array_size, LSL, kPointerSizeLog2));
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ str(scratch1,
+ MemOperand(elements_array_storage, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(elements_array_storage, elements_array_end);
+ __ b(lt, &loop);
+ }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// r0: argc
+// r1: constructor (built-in Array function)
+// lr: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ cmp(r0, Operand(0));
+ __ b(ne, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(r0, r2);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(r0, Operand(1));
+ __ b(ne, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
+ __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+ __ b(ne, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+ __ b(ge, call_generic_code);
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(r0, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Jump(lr);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
+
+ // r0: argc
+ // r1: constructor
+ // r2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store therefore PreIndex is
+ // used when filling the backing store.
+ // r0: argc
+ // r3: JSArray
+ // r4: elements_array storage start (untagged)
+ // r5: elements_array_end (untagged)
+ // sp[0]: last argument
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ cmp(r4, r5);
+ __ b(lt, &loop);
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // r0: argc
+ // r3: JSArray
+ // sp[0]: receiver
+ __ add(sp, sp, Operand(kPointerSize));
+ __ mov(r0, r3);
+ __ Jump(lr);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, r1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // always have a map.
+ GenerateLoadArrayFunction(masm, r2);
+ __ cmp(r1, r2);
+ __ Assert(eq, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@@ -133,7 +540,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
@@ -204,12 +611,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// r5: start of next object
// r7: undefined
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateObjectInNewSpace(r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// r1: constructor
diff --git a/V8Binding/v8/src/arm/cfg-arm.cc b/V8Binding/v8/src/arm/cfg-arm.cc
deleted file mode 100644
index e0e563c..0000000
--- a/V8Binding/v8/src/arm/cfg-arm.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h" // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- StackCheckStub stub;
- __ CallStub(&stub);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ add(sp, sp, Operand((count + 1) * kPointerSize));
- __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
-
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Discard key and receiver.
- __ add(sp, sp, Operand(2 * kPointerSize));
- } else {
- key()->Get(masm, r2);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(); // Discard receiver.
- }
- location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Move left to r1 and right to r0.
- left()->Get(masm, r1);
- right()->Get(masm, r0);
- GenericBinaryOpStub stub(op(), mode);
- __ CallStub(&stub);
- location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ mov(ip, Operand(handle_));
- __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return MemOperand(fp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return MemOperand(r0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ip, Operand(handle_));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ ldr(ip, ToMemOperand(this));
- __ push(ip); // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // Double dispatch.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ ldr(ip, ToMemOperand(this));
- __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(reg, r0);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(r0)) __ mov(r0, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(r0);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, r0);
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ str(r0, ToMemOperand(loc));
- case STACK:
- __ pop(ip);
- __ str(ip, ToMemOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 4c87e06..cdd32f3 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -299,7 +299,10 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
// Generate the return sequence if necessary.
- if (frame_ != NULL || function_return_.is_linked()) {
+ if (has_valid_frame() || function_return_.is_linked()) {
+ if (!function_return_.is_linked()) {
+ CodeForReturnPosition(fun);
+ }
// exit
// r0: result
// sp: stack pointer
@@ -315,12 +318,23 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kTraceExit, 1);
}
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
- __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
- __ Jump(lr);
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ masm_->Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(kJSReturnSequenceLength,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
// Code generation state must be reset.
@@ -1111,10 +1125,10 @@ void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
- // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- masm_->add(lr, pc, Operand(sizeof(Instr)));
+ // Put the lr setup instruction in the delay slot. kInstrSize is added to
+ // the implicit 8 byte offset that always applies to operations with pc and
+ // gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(Assembler::kInstrSize));
masm_->cmp(sp, Operand(ip));
StackCheckStub stub;
// Call the stub if lower.
@@ -1174,7 +1188,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -1380,16 +1393,12 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ReturnStatement");
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
if (function_return_is_shadowed_) {
- CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
frame_->EmitPop(r0);
function_return_.Jump();
} else {
- // Load the returned value.
- CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
-
// Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge.
frame_->EmitPop(r0);
@@ -2801,7 +2810,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -2899,13 +2907,11 @@ void CodeGenerator::VisitCall(Call* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
// Standard function call.
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
@@ -2918,7 +2924,56 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ // Prepare stack for call to resolved function.
+ LoadAndSpill(function);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Prepare stack for call to ResolvePossiblyDirectEval.
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+
+ // Resolve the call.
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up stack with the right values for the function and the receiver.
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -3043,72 +3098,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare stack for call to resolved function.
- LoadAndSpill(function);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2); // Slot for receiver
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Prepare stack for call to ResolvePossiblyDirectEval.
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
- frame_->EmitPush(r1);
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- frame_->EmitPush(r1);
- } else {
- frame_->EmitPush(r2);
- }
-
- // Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Call the function.
- CodeForSourcePosition(node->position());
-
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- frame_->CallStub(&call_function, arg_count + 1);
-
- __ ldr(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->Drop();
- frame_->EmitPush(r0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -4325,7 +4320,7 @@ static void CountLeadingZeros(
Register source,
Register scratch,
Register zeros) {
-#ifdef __ARM_ARCH_5__
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
__ clz(zeros, source); // This instruction is only supported after ARM5.
#else
__ mov(zeros, Operand(0));
@@ -4950,12 +4945,12 @@ static void AllocateHeapNumber(
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
@@ -5066,11 +5061,14 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r5: Address of heap number for result.
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
+ __ AlignStack(0);
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
__ Call(r5);
+ __ pop(r4); // Address of heap number.
+ __ cmp(r4, Operand(Smi::FromInt(0)));
+ __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
// Store answer in the overwritable heap number.
- __ pop(r4);
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
@@ -5623,7 +5621,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
__ StubReturn(1);
}
@@ -5678,6 +5676,13 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
}
+int CEntryStub::MinorKey() {
+ ASSERT(result_size_ <= 2);
+ // Result returned in r0 or r0+r1 by default.
+ return 0;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds the exception.
@@ -6195,7 +6200,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
}
@@ -6216,7 +6221,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
}
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
index 70a7b27..1eb0932 100644
--- a/V8Binding/v8/src/arm/codegen-arm.h
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -180,6 +180,10 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1;
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceLength = 4;
+
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -366,7 +370,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/V8Binding/v8/src/arm/constants-arm.cc b/V8Binding/v8/src/arm/constants-arm.cc
new file mode 100644
index 0000000..964bfe1
--- /dev/null
+++ b/V8Binding/v8/src/arm/constants-arm.cc
@@ -0,0 +1,92 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "constants-arm.h"
+
+
+namespace assembler {
+namespace arm {
+
+namespace v8i = v8::internal;
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
+};
+
+
+// List of alias names which can be used when referring to ARM registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {10, "sl"},
+ {11, "r11"},
+ {12, "r12"},
+ {13, "r13"},
+ {14, "r14"},
+ {15, "r15"},
+ {kNoRegister, NULL}
+};
+
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kNoRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kNoRegister;
+}
+
+
+} } // namespace assembler::arm
diff --git a/V8Binding/v8/src/arm/constants-arm.h b/V8Binding/v8/src/arm/constants-arm.h
index f0311df..6bd0d00 100644
--- a/V8Binding/v8/src/arm/constants-arm.h
+++ b/V8Binding/v8/src/arm/constants-arm.h
@@ -43,15 +43,42 @@
# define USE_THUMB_INTERWORK 1
#endif
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV6_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#endif
+
// Simulator should support ARM5 instructions.
#if !defined(__arm__)
-# define __ARM_ARCH_5__ 1
-# define __ARM_ARCH_5T__ 1
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
namespace assembler {
namespace arm {
+// Number of registers in normal ARM mode.
+static const int kNumRegisters = 16;
+
+// PC is register 15.
+static const int kPCRegister = 15;
+static const int kNoRegister = -1;
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
@@ -269,6 +296,27 @@ class Instr {
};
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char *name;
+ };
+
+ private:
+ static const char* names_[kNumRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+
+
} } // namespace assembler::arm
#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/V8Binding/v8/src/arm/debug-arm.cc b/V8Binding/v8/src/arm/debug-arm.cc
index bcfab6c..4f45175 100644
--- a/V8Binding/v8/src/arm/debug-arm.cc
+++ b/V8Binding/v8/src/arm/debug-arm.cc
@@ -34,28 +34,41 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
-// Currently debug break is not supported in frame exit code on ARM.
bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return false;
+ return Debug::IsDebugBreakAtReturn(rinfo());
}
-// Currently debug break is not supported in frame exit code on ARM.
void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED();
+ // Patch the code changing the return from JS function sequence from
+ // mov sp, fp
+ // ldmia sp!, {fp, lr}
+ // add sp, sp, #4
+ // bx lr
+ // to a call to the debug break return code.
+ // mov lr, pc
+ // ldr pc, [pc, #-4]
+ // <debug break return code entry point address>
+ // bktp 0
+ CodePatcher patcher(rinfo()->pc(), 4);
+ patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
+ patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
+ patcher.Emit(Debug::debug_break_return()->entry());
+ patcher.masm()->bkpt(0);
}
-// Currently debug break is not supported in frame exit code on ARM.
+// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED();
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ CodeGenerator::kJSReturnSequenceLength);
}
+// A debug break in the exit code is identified by a call.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- // Currently debug break is not supported in frame exit code on ARM.
- return false;
+ return rinfo->IsCallInstruction();
}
@@ -95,8 +108,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ LeaveInternalFrame();
- // Inlined ExitJSFrame ends here.
-
// Finally restore all registers.
__ RestoreRegistersFromMemory(kJSCallerSaved);
@@ -138,12 +149,20 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Keyed load IC not implemented on ARM.
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ Generate_DebugBreakCallHelper(masm, 0);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Keyed store IC not implemented on ARM.
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ Generate_DebugBreakCallHelper(masm, 0);
}
@@ -179,14 +198,11 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
- // Generate nothing as this handling of debug break return is not done this
- // way on ARM - yet.
-}
-
-
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- // Generate nothing as CodeStub CallFunction is not used on ARM.
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0);
}
diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc
index 2638409..6431483 100644
--- a/V8Binding/v8/src/arm/disasm-arm.cc
+++ b/V8Binding/v8/src/arm/disasm-arm.cc
@@ -57,6 +57,7 @@
#include "v8.h"
+#include "constants-arm.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
@@ -898,16 +899,6 @@ namespace disasm {
namespace v8i = v8::internal;
-static const int kMaxRegisters = 16;
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* reg_names[kMaxRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
-};
-
-
const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
@@ -921,13 +912,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- const char* result;
- if ((0 <= reg) && (reg < kMaxRegisters)) {
- result = reg_names[reg];
- } else {
- result = "noreg";
- }
- return result;
+ return assembler::arm::Registers::Name(reg);
}
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
index 848d04b..d230b45 100644
--- a/V8Binding/v8/src/arm/ic-arm.cc
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -391,7 +391,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ mov(r0, Operand(2));
__ mov(r1, Operand(f));
- CEntryStub stub;
+ CEntryStub stub(1);
__ CallStub(&stub);
// Move result to r1 and leave the internal frame.
@@ -503,7 +503,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -543,7 +543,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -599,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldm(ia, sp, r0.bit() | r1.bit());
__ stm(db_w, sp, r0.bit() | r1.bit());
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
// Fast case: Do the load.
__ bind(&fast);
@@ -626,7 +626,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
@@ -684,7 +684,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -761,7 +761,7 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Perform tail call to the entry.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
@@ -798,7 +798,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Perform tail call to the entry.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
@@ -814,7 +814,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.cc b/V8Binding/v8/src/arm/macro-assembler-arm.cc
index c77209e..cf46773 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.cc
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.cc
@@ -52,20 +52,15 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t)
-#if defined(USE_THUMB_INTERWORK)
-#if !defined(__ARM_ARCH_5T__) && \
- !defined(__ARM_ARCH_5TE__) && \
- !defined(__ARM_ARCH_7A__) && \
- !defined(__ARM_ARCH_7__)
-// add tests for other versions above v5t as required
-#error "for thumb inter-working we require architecture v5t or above"
-#endif
+// the blx instruction (below v5t). If you know what CPU you are compiling for
+// you can use -march=armv7 or similar.
+#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
+# error "For thumb inter-working we require an architecture which supports blx"
#endif
// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(__ARM_ARCH_5__)
+#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
#endif
@@ -132,7 +127,7 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
// and the target address of the call would be referenced by the first
// instruction rather than the second one, which would make it harder to patch
// (two instructions before the return address, instead of one).
- ASSERT(kPatchReturnSequenceLength == sizeof(Instr));
+ ASSERT(kCallTargetAddressOffset == kInstrSize);
}
@@ -166,7 +161,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
add(pc, pc, Operand(index,
LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
- BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * sizeof(Instr));
+ BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
b(targets[i]);
@@ -296,27 +291,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
- // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_ARM)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand(activation_frame_alignment - 1));
- push(r7, eq); // Conditional push instruction.
- }
+ // alignment in AlignStack.
+ AlignStack(1);
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -348,6 +324,30 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_ARM)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_ARM)
+ if (activation_frame_alignment != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(activation_frame_alignment == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(activation_frame_alignment - offset));
+ push(r7, eq); // Conditional push instruction.
+ }
+}
+
+
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
@@ -768,12 +768,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -818,12 +818,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(scratch1));
ASSERT(!scratch1.is(scratch2));
@@ -999,23 +999,24 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
- int num_arguments) {
+ int num_arguments,
+ int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
- CEntryStub stub;
+ CEntryStub stub(1);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -1052,7 +1053,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
- Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+ Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
}
}
@@ -1070,7 +1071,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
- Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+ Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
}
@@ -1151,4 +1152,38 @@ void MacroAssembler::Abort(const char* msg) {
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+CodePatcher::CodePatcher(byte* address, int instructions)
+ : address_(address),
+ instructions_(instructions),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(address, size_ + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr x) {
+ masm()->emit(x);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+ masm()->emit(reinterpret_cast<Instr>(addr));
+}
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.h b/V8Binding/v8/src/arm/macro-assembler-arm.h
index ad4b174..ee9d70d 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.h
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.h
@@ -96,6 +96,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type);
+ // Align the stack by optionally pushing a Smi zero.
+ void AlignStack(int offset);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -171,18 +173,18 @@ class MacroAssembler: public Assembler {
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
@@ -257,12 +259,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of parameters.
- void TailCallRuntime(const ExternalReference& ext, int num_arguments);
+ void TailCallRuntime(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& builtin);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -327,8 +331,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
@@ -337,6 +349,35 @@ class MacroAssembler: public Assembler {
};
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int instructions);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr x);
+
+ // Emit an address directly.
+ void Emit(Address addr);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int instructions_; // Number of instructions of the expected patch size.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+};
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc
index 7d0ee24..22bec82 100644
--- a/V8Binding/v8/src/arm/simulator-arm.cc
+++ b/V8Binding/v8/src/arm/simulator-arm.cc
@@ -70,6 +70,7 @@ class Debugger {
Simulator* sim_;
+ int32_t GetRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
// Set or delete a breakpoint. Returns true if successful.
@@ -132,43 +133,19 @@ void Debugger::Stop(Instr* instr) {
#endif
-// The order of these are important, see the handling of the 'print all'
-// debugger command.
-static const char* reg_names[] = { "r0", "r1", "r2", "r3",
- "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11",
- "r12", "r13", "r14", "r15",
- "pc", "lr", "sp", "ip",
- "fp", "sl", ""};
-
-static int reg_nums[] = { 0, 1, 2, 3,
- 4, 5, 6, 7,
- 8, 9, 10, 11,
- 12, 13, 14, 15,
- 15, 14, 13, 12,
- 11, 10};
-
-
-static int RegNameToRegNum(const char* name) {
- int reg = 0;
- while (*reg_names[reg] != 0) {
- if (strcmp(reg_names[reg], name) == 0) {
- return reg_nums[reg];
- }
- reg++;
+int32_t Debugger::GetRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
}
- return -1;
}
bool Debugger::GetValue(const char* desc, int32_t* value) {
- int regnum = RegNameToRegNum(desc);
- if (regnum >= 0) {
- if (regnum == 15) {
- *value = sim_->get_pc();
- } else {
- *value = sim_->get_register(regnum);
- }
+ int regnum = Registers::Number(desc);
+ if (regnum != kNoRegister) {
+ *value = GetRegisterValue(regnum);
return true;
} else {
return SScanF(desc, "%i", value) == 1;
@@ -273,17 +250,9 @@ void Debugger::Debug() {
if (args == 2) {
int32_t value;
if (strcmp(arg1, "all") == 0) {
- for (int i = 0; i <= 15; i++) {
- if (GetValue(reg_names[i], &value)) {
- if (i <= 10) {
- PrintF("%3s: 0x%08x %d\n", reg_names[i], value, value);
- } else {
- PrintF("%3s: 0x%08x %d\n",
- reg_names[15 + 16 - i],
- value,
- value);
- }
- }
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
}
} else {
if (GetValue(arg1, &value)) {
@@ -301,7 +270,6 @@ void Debugger::Debug() {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
- USE(obj);
PrintF("%s: \n", arg1);
#ifdef DEBUG
obj->PrintLn();
@@ -441,7 +409,7 @@ void Simulator::Initialize() {
Simulator::Simulator() {
- ASSERT(initialized_);
+ Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -533,6 +501,7 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
+ Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
if (sim == NULL) {
diff --git a/V8Binding/v8/src/arm/simulator-arm.h b/V8Binding/v8/src/arm/simulator-arm.h
index 3917d6a..ff6bbf4 100644
--- a/V8Binding/v8/src/arm/simulator-arm.h
+++ b/V8Binding/v8/src/arm/simulator-arm.h
@@ -36,18 +36,23 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
+#include "allocation.h"
+
#if defined(__arm__)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
@@ -64,12 +69,6 @@
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (assembler::arm::Simulator::current()->StackLimit())
-
-
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
@@ -219,6 +218,20 @@ class Simulator {
} } // namespace assembler::arm
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code. Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return assembler::arm::Simulator::current()->StackLimit();
+ }
+};
+
+
#endif // defined(__arm__)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
index 88d3303..8282655 100644
--- a/V8Binding/v8/src/arm/stub-cache-arm.cc
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -478,7 +478,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5);
+ __ TailCallRuntime(load_callback_property, 5, 1);
}
@@ -514,7 +514,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(load_ic_property, 5);
+ __ TailCallRuntime(load_ic_property, 5, 1);
}
@@ -884,7 +884,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4);
+ __ TailCallRuntime(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -936,7 +936,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3);
+ __ TailCallRuntime(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1390,12 +1390,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// r2: initial map
// r7: undefined
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateObjectInNewSpace(r3,
- r4,
- r5,
- r6,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(r3,
+ r4,
+ r5,
+ r6,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc
index 5b5c870..2d5b140 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.cc
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc
@@ -127,6 +127,10 @@ void VirtualFrame::Enter() {
void VirtualFrame::Exit() {
Comment cmnt(masm(), "[ Exit JS frame");
+ // Record the location of the JS exit code for patching when setting
+ // break point.
+ __ RecordJSReturn();
+
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
@@ -149,10 +153,10 @@ void VirtualFrame::AllocateStackSlots() {
__ push(ip);
}
if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- masm()->add(lr, pc, Operand(sizeof(Instr)));
+ // Put the lr setup instruction in the delay slot. The kInstrSize is added
+ // to the implicit 8 byte offset that always applies to operations with pc
+ // and gives a return address 12 bytes down.
+ masm()->add(lr, pc, Operand(Assembler::kInstrSize));
masm()->cmp(sp, Operand(r2));
StackCheckStub stub;
// Call the stub if lower.
diff --git a/V8Binding/v8/src/array.js b/V8Binding/v8/src/array.js
index eb69f97..f8e63d0 100644
--- a/V8Binding/v8/src/array.js
+++ b/V8Binding/v8/src/array.js
@@ -709,6 +709,8 @@ function ArraySort(comparefn) {
QuickSort(a, high_start, to);
}
+ var length;
+
// Copies elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
@@ -826,7 +828,7 @@ function ArraySort(comparefn) {
return first_undefined;
}
- var length = ToUint32(this.length);
+ length = ToUint32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
diff --git a/V8Binding/v8/src/assembler.cc b/V8Binding/v8/src/assembler.cc
index 3563ebd..d81b4b0 100644
--- a/V8Binding/v8/src/assembler.cc
+++ b/V8Binding/v8/src/assembler.cc
@@ -494,7 +494,7 @@ void RelocInfo::Verify() {
Address addr = target_address();
ASSERT(addr != NULL);
// Check that we can find the right code object.
- HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+ Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = Heap::FindCodeObject(addr);
ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address());
diff --git a/V8Binding/v8/src/ast.cc b/V8Binding/v8/src/ast.cc
index 2b60742..692bec0 100644
--- a/V8Binding/v8/src/ast.cc
+++ b/V8Binding/v8/src/ast.cc
@@ -40,7 +40,6 @@ VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
Call Call::sentinel_(NULL, NULL, 0);
-CallEval CallEval::sentinel_(NULL, NULL, 0);
// ----------------------------------------------------------------------------
diff --git a/V8Binding/v8/src/ast.h b/V8Binding/v8/src/ast.h
index ea83712..6a1cdf5 100644
--- a/V8Binding/v8/src/ast.h
+++ b/V8Binding/v8/src/ast.h
@@ -85,7 +85,6 @@ namespace internal {
V(Throw) \
V(Property) \
V(Call) \
- V(CallEval) \
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
@@ -116,7 +115,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
- AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -140,21 +138,23 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
};
class Statement: public AstNode {
public:
+ Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
bool IsEmpty() { return AsEmptyStatement() != NULL; }
+
+ void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
+ int statement_pos() const { return statement_pos_; }
+
+ private:
+ int statement_pos_;
};
@@ -954,12 +954,8 @@ class Property: public Expression {
class Call: public Expression {
public:
- Call(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos)
- : expression_(expression),
- arguments_(arguments),
- pos_(pos) { }
+ Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
@@ -981,30 +977,21 @@ class Call: public Expression {
};
-class CallNew: public Call {
+class CallNew: public Expression {
public:
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
-
- virtual void Accept(AstVisitor* v);
-};
-
-
-// The CallEval class represents a call of the form 'eval(...)' where eval
-// cannot be seen to be overwritten at compile time. It is potentially a
-// direct (i.e. not aliased) eval call. The real nature of the call is
-// determined at runtime.
-class CallEval: public Call {
- public:
- CallEval(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : Call(expression, arguments, pos) { }
+ : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v);
- static CallEval* sentinel() { return &sentinel_; }
+ Expression* expression() const { return expression_; }
+ ZoneList<Expression*>* arguments() const { return arguments_; }
+ int position() { return pos_; }
private:
- static CallEval sentinel_;
+ Expression* expression_;
+ ZoneList<Expression*>* arguments_;
+ int pos_;
};
diff --git a/V8Binding/v8/src/bootstrapper.cc b/V8Binding/v8/src/bootstrapper.cc
index a2c4562..e2d23ef 100644
--- a/V8Binding/v8/src/bootstrapper.cc
+++ b/V8Binding/v8/src/bootstrapper.cc
@@ -474,7 +474,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
// Please note that the prototype property for function instances must be
// writable.
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false, true);
+ ComputeFunctionInstanceDescriptor(false, false);
fm->set_instance_descriptors(*function_map_descriptors);
// Allocate the function map first and then patch the prototype later
@@ -654,6 +654,8 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
Top::initial_object_prototype(), Builtins::ArrayCode,
true);
+ array_function->shared()->set_construct_stub(
+ Builtins::builtin(Builtins::ArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
@@ -1471,7 +1473,7 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
HandleScope scope;
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false, true);
+ ComputeFunctionInstanceDescriptor(false);
Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm);
@@ -1584,6 +1586,12 @@ char* Bootstrapper::RestoreState(char* from) {
}
+// Called when the top-level V8 mutex is destroyed.
+void Bootstrapper::FreeThreadResources() {
+ ASSERT(Genesis::current() == NULL);
+}
+
+
// Reserve space for statics needing saving and restoring.
int Genesis::ArchiveSpacePerThread() {
return sizeof(current_);
diff --git a/V8Binding/v8/src/bootstrapper.h b/V8Binding/v8/src/bootstrapper.h
index 0d743e3..809cd41 100644
--- a/V8Binding/v8/src/bootstrapper.h
+++ b/V8Binding/v8/src/bootstrapper.h
@@ -74,6 +74,7 @@ class Bootstrapper : public AllStatic {
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
+ static void FreeThreadResources();
};
}} // namespace v8::internal
diff --git a/V8Binding/v8/src/builtins.cc b/V8Binding/v8/src/builtins.cc
index 4262dd2..afb5427 100644
--- a/V8Binding/v8/src/builtins.cc
+++ b/V8Binding/v8/src/builtins.cc
@@ -135,7 +135,9 @@ BUILTIN(EmptyFunction) {
BUILTIN_END
-BUILTIN(ArrayCode) {
+BUILTIN(ArrayCodeGeneric) {
+ Counters::array_function_runtime.Increment();
+
JSArray* array;
if (CalledAsConstructor()) {
array = JSArray::cast(*receiver);
@@ -166,11 +168,13 @@ BUILTIN(ArrayCode) {
// Take the argument as the length.
obj = array->Initialize(0);
if (obj->IsFailure()) return obj;
- if (args.length() == 2) return array->SetElementsLength(args[1]);
+ return array->SetElementsLength(args[1]);
}
// Optimize the case where there are no parameters passed.
- if (args.length() == 1) return array->Initialize(4);
+ if (args.length() == 1) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
// Take the arguments as elements.
int number_of_elements = args.length() - 1;
@@ -609,11 +613,6 @@ static void Generate_Return_DebugBreak(MacroAssembler* masm) {
}
-static void Generate_Return_DebugBreakEntry(MacroAssembler* masm) {
- Debug::GenerateReturnDebugBreakEntry(masm);
-}
-
-
static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Debug::GenerateStubNoRegistersDebugBreak(masm);
}
diff --git a/V8Binding/v8/src/builtins.h b/V8Binding/v8/src/builtins.h
index 0f4a610..141d5b7 100644
--- a/V8Binding/v8/src/builtins.h
+++ b/V8Binding/v8/src/builtins.h
@@ -37,7 +37,7 @@ namespace internal {
\
V(EmptyFunction) \
\
- V(ArrayCode) \
+ V(ArrayCodeGeneric) \
\
V(ArrayPush) \
V(ArrayPop) \
@@ -83,14 +83,15 @@ namespace internal {
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
- V(FunctionApply, BUILTIN, UNINITIALIZED)
-
+ V(FunctionApply, BUILTIN, UNINITIALIZED) \
+ \
+ V(ArrayCode, BUILTIN, UNINITIALIZED) \
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
- V(Return_DebugBreakEntry, BUILTIN, DEBUG_BREAK) \
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
@@ -218,6 +219,9 @@ class Builtins : public AllStatic {
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
+
+ static void Generate_ArrayCode(MacroAssembler* masm);
+ static void Generate_ArrayConstructCode(MacroAssembler* masm);
};
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/cfg.cc b/V8Binding/v8/src/cfg.cc
deleted file mode 100644
index d2dff52..0000000
--- a/V8Binding/v8/src/cfg.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "cfg.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-CfgGlobals* CfgGlobals::top_ = NULL;
-
-
-CfgGlobals::CfgGlobals(FunctionLiteral* fun)
- : global_fun_(fun),
- global_exit_(new ExitNode()),
- nowhere_(new Nowhere()),
-#ifdef DEBUG
- node_counter_(0),
- temp_counter_(0),
-#endif
- previous_(top_) {
- top_ = this;
-}
-
-
-#define BAILOUT(reason) \
- do { return NULL; } while (false)
-
-Cfg* Cfg::Build() {
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- if (fun->scope()->num_heap_slots() > 0) {
- BAILOUT("function has context slots");
- }
- if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
- BAILOUT("function has too many locals");
- }
- if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
- BAILOUT("function has too many parameters");
- }
- if (fun->scope()->arguments() != NULL) {
- BAILOUT("function uses .arguments");
- }
-
- ZoneList<Statement*>* body = fun->body();
- if (body->is_empty()) {
- BAILOUT("empty function body");
- }
-
- StatementCfgBuilder builder;
- builder.VisitStatements(body);
- Cfg* graph = builder.graph();
- if (graph == NULL) {
- BAILOUT("unsupported statement type");
- }
- if (graph->is_empty()) {
- BAILOUT("function body produces empty cfg");
- }
- if (graph->has_exit()) {
- BAILOUT("control path without explicit return");
- }
- graph->PrependEntryNode();
- return graph;
-}
-
-#undef BAILOUT
-
-
-void Cfg::PrependEntryNode() {
- ASSERT(!is_empty());
- entry_ = new EntryNode(InstructionBlock::cast(entry()));
-}
-
-
-void Cfg::Append(Instruction* instr) {
- ASSERT(is_empty() || has_exit());
- if (is_empty()) {
- entry_ = exit_ = new InstructionBlock();
- }
- InstructionBlock::cast(exit_)->Append(instr);
-}
-
-
-void Cfg::AppendReturnInstruction(Value* value) {
- Append(new ReturnInstr(value));
- ExitNode* global_exit = CfgGlobals::current()->exit();
- InstructionBlock::cast(exit_)->set_successor(global_exit);
- exit_ = NULL;
-}
-
-
-void Cfg::Concatenate(Cfg* other) {
- ASSERT(is_empty() || has_exit());
- if (other->is_empty()) return;
-
- if (is_empty()) {
- entry_ = other->entry();
- exit_ = other->exit();
- } else {
- // We have a pair of nonempty fragments and this has an available exit.
- // Destructively glue the fragments together.
- InstructionBlock* first = InstructionBlock::cast(exit_);
- InstructionBlock* second = InstructionBlock::cast(other->entry());
- first->instructions()->AddAll(*second->instructions());
- if (second->successor() != NULL) {
- first->set_successor(second->successor());
- exit_ = other->exit();
- }
- }
-}
-
-
-void InstructionBlock::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void EntryNode::Unmark() {
- if (is_marked_) {
- is_marked_ = false;
- successor_->Unmark();
- }
-}
-
-
-void ExitNode::Unmark() {
- is_marked_ = false;
-}
-
-
-Handle<Code> Cfg::Compile(Handle<Script> script) {
- const int kInitialBufferSize = 4 * KB;
- MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
- entry()->Compile(masm);
- entry()->Unmark();
- CodeDesc desc;
- masm->GetCode(&desc);
- FunctionLiteral* fun = CfgGlobals::current()->fun();
- ZoneScopeInfo info(fun->scope());
- InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
- Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, masm);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code) {
- // Print the source code if available.
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(fun->start_position());
- // fun->end_position() points to the last character in the
- // stream. We need to compensate by adding one to calculate the
- // length.
- int source_len = fun->end_position() - fun->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- PrintF("--- Code ---\n");
- code->Disassemble(*fun->name()->ToCString());
- }
-#endif
-
- return code;
-}
-
-
-void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where(TempLocation::STACK);
-}
-
-
-void OneOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
- temp->set_where((temp == value0_ || temp == value1_)
- ? TempLocation::ACCUMULATOR
- : TempLocation::STACK);
-}
-
-
-void PositionInstr::Compile(MacroAssembler* masm) {
- if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
- masm->RecordStatementPosition(pos_);
- masm->RecordPosition(pos_);
- }
-}
-
-
-void MoveInstr::Compile(MacroAssembler* masm) {
- location()->Move(masm, value());
-}
-
-
-// The expression builder should not be used for declarations or statements.
-void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-#define DEFINE_VISIT(type) \
- void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
-STATEMENT_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-// Macros (temporarily) handling unsupported expression types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- BAILOUT("FunctionLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
-}
-
-
-void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
- BAILOUT("Slot");
-}
-
-
-void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
- Expression* rewrite = expr->var()->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() == NULL) {
- BAILOUT("unsupported variable (not a slot)");
- }
- Slot* slot = rewrite->AsSlot();
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot type (not a parameter or local)");
- }
- // Ignore the passed destination.
- value_ = new SlotLocation(slot->type(), slot->index());
-}
-
-
-void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
- // Ignore the passed destination.
- value_ = new Constant(expr->handle());
-}
-
-
-void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- BAILOUT("RegExpLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- BAILOUT("ArrayLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
-}
-
-
-void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
- if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
- BAILOUT("unsupported compound assignment");
- }
- Expression* lhs = expr->target();
- if (lhs->AsProperty() != NULL) {
- BAILOUT("unsupported property assignment");
- }
-
- Variable* var = lhs->AsVariableProxy()->AsVariable();
- if (var == NULL) {
- BAILOUT("unsupported invalid left-hand side");
- }
- if (var->is_global()) {
- BAILOUT("unsupported global variable");
- }
- Slot* slot = var->slot();
- ASSERT(slot != NULL);
- if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
- BAILOUT("unsupported slot lhs (not a parameter or local)");
- }
-
- // Parameter and local slot assignments.
- ExpressionCfgBuilder builder;
- SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
- builder.Build(expr->value(), loc);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in assignment");
- }
- // If the expression did not come back in the slot location, append
- // a move to the CFG.
- graph_ = builder.graph();
- if (builder.value() != loc) {
- graph()->Append(new MoveInstr(loc, builder.value()));
- }
- // Record the assignment.
- assigned_vars_.AddElement(loc);
- // Ignore the destination passed to us.
- value_ = loc;
-}
-
-
-void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
-}
-
-
-void ExpressionCfgBuilder::VisitProperty(Property* expr) {
- ExpressionCfgBuilder object, key;
- object.Build(expr->obj(), NULL);
- if (object.graph() == NULL) {
- BAILOUT("unsupported object subexpression in propload");
- }
- key.Build(expr->key(), NULL);
- if (key.graph() == NULL) {
- BAILOUT("unsupported key subexpression in propload");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = object.graph();
- // Insert a move to a fresh temporary if the object value is in a slot
- // that's assigned in the key.
- Location* temp = NULL;
- if (object.value()->is_slot() &&
- key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, object.value()));
- }
- graph()->Concatenate(key.graph());
- graph()->Append(new PropLoadInstr(destination_,
- temp == NULL ? object.value() : temp,
- key.value()));
-
- assigned_vars_ = *object.assigned_vars();
- assigned_vars()->Union(key.assigned_vars());
-
- value_ = destination_;
-}
-
-
-void ExpressionCfgBuilder::VisitCall(Call* expr) {
- BAILOUT("Call");
-}
-
-
-void ExpressionCfgBuilder::VisitCallEval(CallEval* expr) {
- BAILOUT("CallEval");
-}
-
-
-void ExpressionCfgBuilder::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
-}
-
-
-void ExpressionCfgBuilder::VisitCallRuntime(CallRuntime* expr) {
- BAILOUT("CallRuntime");
-}
-
-
-void ExpressionCfgBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- Token::Value op = expr->op();
- switch (op) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- BAILOUT("unsupported binary operation");
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- ExpressionCfgBuilder left, right;
- left.Build(expr->left(), NULL);
- if (left.graph() == NULL) {
- BAILOUT("unsupported left subexpression in binop");
- }
- right.Build(expr->right(), NULL);
- if (right.graph() == NULL) {
- BAILOUT("unsupported right subexpression in binop");
- }
-
- if (destination_ == NULL) destination_ = new TempLocation();
-
- graph_ = left.graph();
- // Insert a move to a fresh temporary if the left value is in a
- // slot that's assigned on the right.
- Location* temp = NULL;
- if (left.value()->is_slot() &&
- right.assigned_vars()->Contains(SlotLocation::cast(left.value()))) {
- temp = new TempLocation();
- graph()->Append(new MoveInstr(temp, left.value()));
- }
- graph()->Concatenate(right.graph());
- graph()->Append(new BinaryOpInstr(destination_, op,
- temp == NULL ? left.value() : temp,
- right.value()));
-
- assigned_vars_ = *left.assigned_vars();
- assigned_vars()->Union(right.assigned_vars());
-
- value_ = destination_;
- return;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void ExpressionCfgBuilder::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-
-
-// Macros (temporarily) handling unsupported statement types.
-#define BAILOUT(reason) \
- do { \
- graph_ = NULL; \
- return; \
- } while (false)
-
-#define CHECK_BAILOUT() \
- if (graph() == NULL) { return; } else {}
-
-void StatementCfgBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT();
- if (!graph()->has_exit()) return;
- }
-}
-
-
-// The statement builder should not be used for declarations or expressions.
-void StatementCfgBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
-
-#define DEFINE_VISIT(type) \
- void StatementCfgBuilder::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-void StatementCfgBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void StatementCfgBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), CfgGlobals::current()->nowhere());
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in expression statement");
- }
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
-}
-
-
-void StatementCfgBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void StatementCfgBuilder::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
-}
-
-
-void StatementCfgBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("ContinueStatement");
-}
-
-
-void StatementCfgBuilder::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
-
-
-void StatementCfgBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- ExpressionCfgBuilder builder;
- builder.Build(stmt->expression(), NULL);
- if (builder.graph() == NULL) {
- BAILOUT("unsupported expression in return statement");
- }
-
- graph()->Append(new PositionInstr(stmt->statement_pos()));
- graph()->Concatenate(builder.graph());
- graph()->AppendReturnInstruction(builder.value());
-}
-
-
-void StatementCfgBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
-}
-
-
-void StatementCfgBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
-
-
-void StatementCfgBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void StatementCfgBuilder::VisitLoopStatement(LoopStatement* stmt) {
- BAILOUT("LoopStatement");
-}
-
-
-void StatementCfgBuilder::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void StatementCfgBuilder::VisitTryCatch(TryCatch* stmt) {
- BAILOUT("TryCatch");
-}
-
-
-void StatementCfgBuilder::VisitTryFinally(TryFinally* stmt) {
- BAILOUT("TryFinally");
-}
-
-
-void StatementCfgBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
-}
-
-
-#ifdef DEBUG
-// CFG printing support (via depth-first, preorder block traversal).
-
-void Cfg::Print() {
- entry_->Print();
- entry_->Unmark();
-}
-
-
-void Constant::Print() {
- PrintF("Constant ");
- handle_->Print();
-}
-
-
-void Nowhere::Print() {
- PrintF("Nowhere");
-}
-
-
-void SlotLocation::Print() {
- PrintF("Slot ");
- switch (type_) {
- case Slot::PARAMETER:
- PrintF("(PARAMETER, %d)", index_);
- break;
- case Slot::LOCAL:
- PrintF("(LOCAL, %d)", index_);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Print() {
- PrintF("Temp %d", number());
-}
-
-
-void OneOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value_->Print();
- PrintF(")");
-}
-
-
-void TwoOperandInstruction::Print() {
- PrintF("(");
- location()->Print();
- PrintF(", ");
- value0_->Print();
- PrintF(", ");
- value1_->Print();
- PrintF(")");
-}
-
-
-void MoveInstr::Print() {
- PrintF("Move ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void PropLoadInstr::Print() {
- PrintF("PropLoad ");
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void BinaryOpInstr::Print() {
- switch (op()) {
- case Token::OR:
- // Two character operand.
- PrintF("BinaryOp[OR] ");
- break;
- case Token::AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Three character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- case Token::COMMA:
- // Five character operand.
- PrintF("BinaryOp[COMMA] ");
- break;
- case Token::BIT_OR:
- // Six character operand.
- PrintF("BinaryOp[BIT_OR] ");
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Seven character operands.
- PrintF("BinaryOp[%s] ", Token::Name(op()));
- break;
- default:
- UNREACHABLE();
- }
- TwoOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void ReturnInstr::Print() {
- PrintF("Return ");
- OneOperandInstruction::Print();
- PrintF("\n");
-}
-
-
-void InstructionBlock::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\n", number());
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- instructions_[i]->Print();
- }
- PrintF("Goto L%d\n\n", successor_->number());
- successor_->Print();
- }
-}
-
-
-void EntryNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- successor_->Print();
- }
-}
-
-
-void ExitNode::Print() {
- if (!is_marked_) {
- is_marked_ = true;
- PrintF("L%d:\nExit\n\n", number());
- }
-}
-
-#endif // DEBUG
-
-} } // namespace v8::internal
diff --git a/V8Binding/v8/src/cfg.h b/V8Binding/v8/src/cfg.h
deleted file mode 100644
index 0eb0f92..0000000
--- a/V8Binding/v8/src/cfg.h
+++ /dev/null
@@ -1,871 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CFG_H_
-#define V8_CFG_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-class ExitNode;
-class Location;
-
-// Translate a source AST into a control-flow graph (CFG). The CFG contains
-// single-entry, single-exit blocks of straight-line instructions and
-// administrative nodes.
-//
-// Instructions are described by the following grammar.
-//
-// <Instruction> ::=
-// Move <Location> <Value>
-// | PropLoad <Location> <Value> <Value>
-// | BinaryOp <Location> Token::Value <Value> <Value>
-// | Return Nowhere <Value>
-// | Position <Int>
-//
-// Values are trivial expressions:
-//
-// <Value> ::= Constant | <Location>
-//
-// Locations are storable values ('lvalues'). They can be slots,
-// compiler-generated temporaries, or the special location 'Nowhere'
-// indicating that no value is needed.
-//
-// <Location> ::=
-// SlotLocation Slot::Type <Index>
-// | TempLocation
-// | Nowhere
-
-
-// Administrative nodes: There are several types of 'administrative' nodes
-// that do not contain instructions and do not necessarily have a single
-// predecessor and a single successor.
-//
-// EntryNode: there is a distinguished entry node that has no predecessors
-// and a single successor.
-//
-// ExitNode: there is a distinguished exit node that has arbitrarily many
-// predecessors and no successor.
-//
-// JoinNode: join nodes have multiple predecessors and a single successor.
-//
-// BranchNode: branch nodes have a single predecessor and multiple
-// successors.
-
-
-// A convenient class to keep 'global' values when building a CFG. Since
-// CFG construction can be invoked recursively, CFG globals are stacked.
-class CfgGlobals BASE_EMBEDDED {
- public:
- explicit CfgGlobals(FunctionLiteral* fun);
-
- ~CfgGlobals() { top_ = previous_; }
-
- static CfgGlobals* current() {
- ASSERT(top_ != NULL);
- return top_;
- }
-
- // The function currently being compiled.
- FunctionLiteral* fun() { return global_fun_; }
-
- // The shared global exit node for all exits from the function.
- ExitNode* exit() { return global_exit_; }
-
- // A singleton.
- Location* nowhere() { return nowhere_; }
-
-#ifdef DEBUG
- int next_node_number() { return node_counter_++; }
- int next_temp_number() { return temp_counter_++; }
-#endif
-
- private:
- static CfgGlobals* top_;
- FunctionLiteral* global_fun_;
- ExitNode* global_exit_;
- Location* nowhere_;
-
-#ifdef DEBUG
- // Used to number nodes and temporaries when printing.
- int node_counter_;
- int temp_counter_;
-#endif
-
- CfgGlobals* previous_;
-};
-
-
-class SlotLocation;
-
-// Values represent trivial source expressions: ones with no side effects
-// and that do not require code to be generated.
-class Value : public ZoneObject {
- public:
- virtual ~Value() {}
-
- // Predicates:
-
- virtual bool is_temporary() { return false; }
- virtual bool is_slot() { return false; }
- virtual bool is_constant() { return false; }
-
- // True if the value is a temporary allocated to the stack in
- // fast-compilation mode.
- virtual bool is_on_stack() { return false; }
-
- // Support for fast-compilation mode:
-
- // Move the value into a register.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Push the value on the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Move the value into a slot location.
- virtual void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// A compile-time constant that appeared as a literal in the source AST.
-class Constant : public Value {
- public:
- explicit Constant(Handle<Object> handle) : handle_(handle) {}
-
- // Cast accessor.
- static Constant* cast(Value* value) {
- ASSERT(value->is_constant());
- return reinterpret_cast<Constant*>(value);
- }
-
- // Accessors.
- Handle<Object> handle() { return handle_; }
-
- // Predicates.
- bool is_constant() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Handle<Object> handle_;
-};
-
-
-// Locations are values that can be stored into ('lvalues').
-class Location : public Value {
- public:
- virtual ~Location() {}
-
- // Static factory function returning the singleton nowhere location.
- static Location* Nowhere() {
- return CfgGlobals::current()->nowhere();
- }
-
- // Support for fast-compilation mode:
-
- // Assumes temporaries have been allocated.
- virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
- // Store the value in a register to the location. Assumes temporaries
- // have been allocated.
- virtual void Set(MacroAssembler* masm, Register reg) = 0;
-
- // Assumes temporaries have been allocated, and if the value is a
- // temporary it was not allocated to the stack.
- virtual void Push(MacroAssembler* masm) = 0;
-
- // Emit code to move a value into this location.
- virtual void Move(MacroAssembler* masm, Value* value) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-};
-
-
-// Nowhere is a special (singleton) location that indicates the value of a
-// computation is not needed (though its side effects are).
-class Nowhere : public Location {
- public:
- // We should not try to emit code to read Nowhere.
- void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
- void Push(MacroAssembler* masm) { UNREACHABLE(); }
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { UNREACHABLE(); }
-
- // Setting Nowhere is ignored.
- void Set(MacroAssembler* masm, Register reg) {}
- void Move(MacroAssembler* masm, Value* value) {}
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Nowhere() {}
-
- friend class CfgGlobals;
-};
-
-
-// SlotLocations represent parameters and stack-allocated (i.e.,
-// non-context) local variables.
-class SlotLocation : public Location {
- public:
- SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
-
- // Cast accessor.
- static SlotLocation* cast(Value* value) {
- ASSERT(value->is_slot());
- return reinterpret_cast<SlotLocation*>(value);
- }
-
- // Accessors.
- Slot::Type type() { return type_; }
- int index() { return index_; }
-
- // Predicates.
- bool is_slot() { return true; }
-
- // Support for fast-compilation mode.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Slot::Type type_;
- int index_;
-};
-
-
-// TempLocations represent compiler generated temporaries. They are
-// allocated to registers or memory either before code generation (in the
-// optimized-for-speed compiler) or on the fly during code generation (in
-// the optimized-for-space compiler).
-class TempLocation : public Location {
- public:
- // Fast-compilation mode allocation decisions.
- enum Where {
- NOT_ALLOCATED, // Not yet allocated.
- ACCUMULATOR, // Allocated to the dedicated accumulator register.
- STACK // " " " " stack.
- };
-
- TempLocation() : where_(NOT_ALLOCATED) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- // Cast accessor.
- static TempLocation* cast(Value* value) {
- ASSERT(value->is_temporary());
- return reinterpret_cast<TempLocation*>(value);
- }
-
- // Accessors.
- Where where() { return where_; }
- void set_where(Where where) {
- ASSERT(where_ == TempLocation::NOT_ALLOCATED);
- where_ = where;
- }
-
- // Predicates.
- bool is_on_stack() { return where_ == STACK; }
- bool is_temporary() { return true; }
-
- // Support for fast-compilation mode. Assume the temp has been allocated.
- void Get(MacroAssembler* masm, Register reg);
- void Set(MacroAssembler* masm, Register reg);
- void Push(MacroAssembler* masm);
- void Move(MacroAssembler* masm, Value* value);
- void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
- return number_;
- }
-
- void Print();
-#endif
-
- private:
- Where where_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// Instructions are computations. The represent non-trivial source
-// expressions: typically ones that have side effects and require code to
-// be generated.
-class Instruction : public ZoneObject {
- public:
- // Accessors.
- Location* location() { return location_; }
- void set_location(Location* location) { location_ = location; }
-
- // Support for fast-compilation mode:
-
- // Emit code to perform the instruction.
- virtual void Compile(MacroAssembler* masm) = 0;
-
- // Allocate a temporary which is the result of the immediate predecessor
- // instruction. It is allocated to the accumulator register if it is used
- // as an operand to this instruction, otherwise to the stack.
- virtual void FastAllocate(TempLocation* temp) = 0;
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- // Every instruction has a location where its result is stored (which may
- // be Nowhere).
- explicit Instruction(Location* location) : location_(location) {}
-
- virtual ~Instruction() {}
-
- Location* location_;
-};
-
-
-// Base class of instructions that have no input operands.
-class ZeroOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands (nothing).
- virtual void Print() {}
-#endif
-
- protected:
- explicit ZeroOperandInstruction(Location* loc) : Instruction(loc) {}
-};
-
-
-// Base class of instructions that have a single input operand.
-class OneOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- OneOperandInstruction(Location* loc, Value* value)
- : Instruction(loc), value_(value) {
- }
-
- Value* value_;
-};
-
-
-// Base class of instructions that have two input operands.
-class TwoOperandInstruction : public Instruction {
- public:
- // Support for fast-compilation mode:
- virtual void Compile(MacroAssembler* masm) = 0;
- void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
- // Printing support: print the operands.
- virtual void Print();
-#endif
-
- protected:
- TwoOperandInstruction(Location* loc, Value* value0, Value* value1)
- : Instruction(loc), value0_(value0), value1_(value1) {
- }
-
- Value* value0_;
- Value* value1_;
-};
-
-
-// A phantom instruction that indicates the start of a statement. It
-// causes the statement position to be recorded in the relocation
-// information but generates no code.
-class PositionInstr : public ZeroOperandInstruction {
- public:
- explicit PositionInstr(int pos)
- : ZeroOperandInstruction(CfgGlobals::current()->nowhere()), pos_(pos) {
- }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // This should not be called. The last instruction of the previous
- // statement should not have a temporary as its location.
- void FastAllocate(TempLocation* temp) { UNREACHABLE(); }
-
-#ifdef DEBUG
- // Printing support. Print nothing.
- void Print() {}
-#endif
-
- private:
- int pos_;
-};
-
-
-// Move a value to a location.
-class MoveInstr : public OneOperandInstruction {
- public:
- MoveInstr(Location* loc, Value* value)
- : OneOperandInstruction(loc, value) {
- }
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- // Printing support.
- void Print();
-#endif
-};
-
-
-// Load a property from a receiver, leaving the result in a location.
-class PropLoadInstr : public TwoOperandInstruction {
- public:
- PropLoadInstr(Location* loc, Value* object, Value* key)
- : TwoOperandInstruction(loc, object, key) {
- }
-
- // Accessors.
- Value* object() { return value0_; }
- Value* key() { return value1_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Perform a (non-short-circuited) binary operation on a pair of values,
-// leaving the result in a location.
-class BinaryOpInstr : public TwoOperandInstruction {
- public:
- BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
- : TwoOperandInstruction(loc, left, right), op_(op) {
- }
-
- // Accessors.
- Value* left() { return value0_; }
- Value* right() { return value1_; }
- Token::Value op() { return op_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- Token::Value op_;
-};
-
-
-// Return a value. Has the side effect of moving its value into the return
-// value register. Can only occur as the last instruction in an instruction
-// block, and implies that the block is closed (cannot have instructions
-// appended or graph fragments concatenated to the end) and that the block's
-// successor is the global exit node for the current function.
-class ReturnInstr : public OneOperandInstruction {
- public:
- explicit ReturnInstr(Value* value)
- : OneOperandInstruction(CfgGlobals::current()->nowhere(), value) {
- }
-
- virtual ~ReturnInstr() {}
-
- // Accessors.
- Value* value() { return value_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// Nodes make up control-flow graphs.
-class CfgNode : public ZoneObject {
- public:
- CfgNode() : is_marked_(false) {
-#ifdef DEBUG
- number_ = -1;
-#endif
- }
-
- virtual ~CfgNode() {}
-
- // Because CFGs contain cycles, nodes support marking during traversal
- // (e.g., for printing or compilation). The traversal functions will mark
- // unmarked nodes and backtrack if they encounter a marked one. After a
- // traversal, the graph should be explicitly unmarked by calling Unmark on
- // the entry node.
- bool is_marked() { return is_marked_; }
- virtual void Unmark() = 0;
-
- // Predicates:
-
- // True if the node is an instruction block.
- virtual bool is_block() { return false; }
-
- // Support for fast-compilation mode. Emit the instructions or control
- // flow represented by the node.
- virtual void Compile(MacroAssembler* masm) = 0;
-
-#ifdef DEBUG
- int number() {
- if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
- return number_;
- }
-
- virtual void Print() = 0;
-#endif
-
- protected:
- bool is_marked_;
-
-#ifdef DEBUG
- int number_;
-#endif
-};
-
-
-// A block is a single-entry, single-exit block of instructions.
-class InstructionBlock : public CfgNode {
- public:
- InstructionBlock() : successor_(NULL), instructions_(4) {}
-
- virtual ~InstructionBlock() {}
-
- void Unmark();
-
- // Cast accessor.
- static InstructionBlock* cast(CfgNode* node) {
- ASSERT(node->is_block());
- return reinterpret_cast<InstructionBlock*>(node);
- }
-
- bool is_block() { return true; }
-
- // Accessors.
- CfgNode* successor() { return successor_; }
-
- void set_successor(CfgNode* succ) {
- ASSERT(successor_ == NULL);
- successor_ = succ;
- }
-
- ZoneList<Instruction*>* instructions() { return &instructions_; }
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
- // Add an instruction to the end of the block.
- void Append(Instruction* instr) { instructions_.Add(instr); }
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- CfgNode* successor_;
- ZoneList<Instruction*> instructions_;
-};
-
-
-// An entry node (one per function).
-class EntryNode : public CfgNode {
- public:
- explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
-
- virtual ~EntryNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- private:
- InstructionBlock* successor_;
-};
-
-
-// An exit node (one per function).
-class ExitNode : public CfgNode {
- public:
- ExitNode() {}
-
- virtual ~ExitNode() {}
-
- void Unmark();
-
- // Support for fast-compilation mode.
- void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
- void Print();
-#endif
-};
-
-
-// A CFG consists of a linked structure of nodes. Nodes are linked by
-// pointing to their successors, always beginning with a (single) entry node
-// (not necessarily of type EntryNode). If it is still possible to add
-// nodes to the end of the graph (i.e., there is a (single) path that does
-// not end with the global exit node), then the CFG has an exit node as
-// well.
-//
-// The empty CFG is represented by a NULL entry and a NULL exit.
-//
-// We use the term 'open fragment' to mean a CFG whose entry and exits are
-// both instruction blocks. It is always possible to add instructions and
-// nodes to the beginning or end of an open fragment.
-//
-// We use the term 'closed fragment' to mean a CFG whose entry is an
-// instruction block and whose exit is NULL (all paths go to the global
-// exit).
-//
-// We use the term 'fragment' to refer to a CFG that is known to be an open
-// or closed fragment.
-class Cfg : public ZoneObject {
- public:
- // Create an empty CFG fragment.
- Cfg() : entry_(NULL), exit_(NULL) {}
-
- // Build the CFG for a function. The returned CFG begins with an
- // EntryNode and all paths end with the ExitNode.
- static Cfg* Build();
-
- // The entry and exit nodes of the CFG (not necessarily EntryNode and
- // ExitNode).
- CfgNode* entry() { return entry_; }
- CfgNode* exit() { return exit_; }
-
- // True if the CFG has no nodes.
- bool is_empty() { return entry_ == NULL; }
-
- // True if the CFG has an available exit node (i.e., it can be appended or
- // concatenated to).
- bool has_exit() { return exit_ != NULL; }
-
- // Add an EntryNode to a CFG fragment. It is no longer a fragment
- // (instructions can no longer be prepended).
- void PrependEntryNode();
-
- // Append an instruction to the end of an open fragment.
- void Append(Instruction* instr);
-
- // Appends a return instruction to the end of an open fragment and make
- // it a closed fragment (the exit's successor becomes global exit node).
- void AppendReturnInstruction(Value* value);
-
- // Glue an other CFG fragment to the end of this (open) fragment.
- void Concatenate(Cfg* other);
-
- // Support for compilation. Compile the entire CFG.
- Handle<Code> Compile(Handle<Script> script);
-
-#ifdef DEBUG
- // Support for printing.
- void Print();
-#endif
-
- private:
- // Entry and exit nodes.
- CfgNode* entry_;
- CfgNode* exit_;
-};
-
-
-// An implementation of a set of locations (currently slot locations), most
-// of the operations are destructive.
-class LocationSet BASE_EMBEDDED {
- public:
- // Construct an empty location set.
- LocationSet() : parameters_(0), locals_(0) {}
-
- // Raw accessors.
- uintptr_t parameters() { return parameters_; }
- uintptr_t locals() { return locals_; }
-
- // Make this the empty set.
- void Empty() {
- parameters_ = locals_ = 0;
- }
-
- // Insert an element.
- void AddElement(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- // Parameter indexes begin with -1 ('this').
- ASSERT(location->index() < kBitsPerPointer - 1);
- parameters_ |= (1 << (location->index() + 1));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- locals_ |= (1 << location->index());
- }
- }
-
- // (Destructively) compute the union with another set.
- void Union(LocationSet* other) {
- parameters_ |= other->parameters();
- locals_ |= other->locals();
- }
-
- bool Contains(SlotLocation* location) {
- if (location->type() == Slot::PARAMETER) {
- ASSERT(location->index() < kBitsPerPointer - 1);
- return (parameters_ & (1 << (location->index() + 1)));
- } else {
- ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kBitsPerPointer);
- return (locals_ & (1 << location->index()));
- }
- }
-
- private:
- uintptr_t parameters_;
- uintptr_t locals_;
-};
-
-
-// An ExpressionCfgBuilder traverses an expression and returns an open CFG
-// fragment (currently a possibly empty list of instructions represented by
-// a singleton instruction block) and the expression's value.
-//
-// Failure to build the CFG is indicated by a NULL CFG.
-class ExpressionCfgBuilder : public AstVisitor {
- public:
- ExpressionCfgBuilder() : destination_(NULL), value_(NULL), graph_(NULL) {}
-
- // Result accessors.
- Value* value() { return value_; }
- Cfg* graph() { return graph_; }
- LocationSet* assigned_vars() { return &assigned_vars_; }
-
- // Build the cfg for an expression and remember its value. The
- // destination is a 'hint' where the value should go which may be ignored.
- // NULL is used to indicate no preference.
- //
- // Concretely, if the expression needs to generate a temporary for its
- // value, it should use the passed destination or generate one if NULL.
- void Build(Expression* expr, Location* destination) {
- value_ = NULL;
- graph_ = new Cfg();
- assigned_vars_.Empty();
- destination_ = destination;
- Visit(expr);
- }
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input parameter:
- Location* destination_;
-
- // Output parameters:
- Value* value_;
- Cfg* graph_;
- LocationSet assigned_vars_;
-};
-
-
-// A StatementCfgBuilder maintains a CFG fragment accumulator. When it
-// visits a statement, it concatenates the CFG for the statement to the end
-// of the accumulator.
-class StatementCfgBuilder : public AstVisitor {
- public:
- StatementCfgBuilder() : graph_(new Cfg()) {}
-
- Cfg* graph() { return graph_; }
-
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- // State for the visitor. Input/output parameter:
- Cfg* graph_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CFG_H_
diff --git a/V8Binding/v8/src/codegen.cc b/V8Binding/v8/src/codegen.cc
index 8e516c0..a18fa0f 100644
--- a/V8Binding/v8/src/codegen.cc
+++ b/V8Binding/v8/src/codegen.cc
@@ -469,44 +469,32 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
}
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->start_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
+static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+ if (pos != RelocInfo::kNoPosition) {
+ cgen->masm()->RecordStatementPosition(pos);
+ cgen->masm()->RecordPosition(pos);
}
}
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+ if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+}
+
+
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- int pos = fun->end_position();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+ if (FLAG_debug_info) RecordPositions(this, fun->end_position());
}
-void CodeGenerator::CodeForStatementPosition(AstNode* node) {
- if (FLAG_debug_info) {
- int pos = node->statement_pos();
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordStatementPosition(pos);
- masm()->RecordPosition(pos);
- }
- }
+void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
+ if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
}
void CodeGenerator::CodeForSourcePosition(int pos) {
- if (FLAG_debug_info) {
- if (pos != RelocInfo::kNoPosition) {
- masm()->RecordPosition(pos);
- }
+ if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ masm()->RecordPosition(pos);
}
}
@@ -517,7 +505,10 @@ const char* RuntimeStub::GetName() {
void RuntimeStub::Generate(MacroAssembler* masm) {
- masm->TailCallRuntime(ExternalReference(id_), num_arguments_);
+ Runtime::Function* f = Runtime::FunctionForId(id_);
+ masm->TailCallRuntime(ExternalReference(f),
+ num_arguments_,
+ f->result_size);
}
diff --git a/V8Binding/v8/src/codegen.h b/V8Binding/v8/src/codegen.h
index d6967b7..d03f4b6 100644
--- a/V8Binding/v8/src/codegen.h
+++ b/V8Binding/v8/src/codegen.h
@@ -286,7 +286,7 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
- CEntryStub() { }
+ explicit CEntryStub(int result_size) : result_size_(result_size) { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
@@ -302,10 +302,14 @@ class CEntryStub : public CodeStub {
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
-
private:
+ // Number of pointers/values returned.
+ int result_size_;
+
Major MajorKey() { return CEntry; }
- int MinorKey() { return 0; }
+ // Minor key must differ if different result_size_ values means different
+ // code is generated.
+ int MinorKey();
const char* GetName() { return "CEntryStub"; }
};
@@ -313,7 +317,7 @@ class CEntryStub : public CodeStub {
class CEntryDebugBreakStub : public CEntryStub {
public:
- CEntryDebugBreakStub() { }
+ CEntryDebugBreakStub() : CEntryStub(1) { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
diff --git a/V8Binding/v8/src/compiler.cc b/V8Binding/v8/src/compiler.cc
index 15f6479..6ba7a9a 100644
--- a/V8Binding/v8/src/compiler.cc
+++ b/V8Binding/v8/src/compiler.cc
@@ -28,7 +28,6 @@
#include "v8.h"
#include "bootstrapper.h"
-#include "cfg.h"
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
@@ -79,22 +78,6 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
return Handle<Code>::null();
}
- if (FLAG_multipass) {
- CfgGlobals scope(literal);
- Cfg* cfg = Cfg::Build();
-#ifdef DEBUG
- if (FLAG_print_cfg && cfg != NULL) {
- SmartPointer<char> name = literal->name()->ToCString();
- PrintF("Function \"%s\":\n", *name);
- cfg->Print();
- PrintF("\n");
- }
-#endif
- if (cfg != NULL) {
- return cfg->Compile(script);
- }
- }
-
// Generate code and return it.
Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
return result;
@@ -121,8 +104,6 @@ static Handle<JSFunction> MakeFunction(bool is_global,
ScriptDataImpl* pre_data) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
ASSERT(!i::Top::global_context().is_null());
@@ -351,8 +332,6 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
- // Make sure we have an initial stack limit.
- StackGuard guard;
PostponeInterruptsScope postpone;
// Compute name, source code and script data.
diff --git a/V8Binding/v8/src/d8.js b/V8Binding/v8/src/d8.js
index 7249eca..be4a051 100644
--- a/V8Binding/v8/src/d8.js
+++ b/V8Binding/v8/src/d8.js
@@ -102,7 +102,8 @@ Debug.ScriptCompilationType = { Host: 0,
Debug.ScopeType = { Global: 0,
Local: 1,
With: 2,
- Closure: 3 };
+ Closure: 3,
+ Catch: 4 };
// Current debug state.
@@ -900,6 +901,10 @@ function formatScope_(scope) {
result += 'With, ';
result += '#' + scope.object.ref + '#';
break;
+ case Debug.ScopeType.Catch:
+ result += 'Catch, ';
+ result += '#' + scope.object.ref + '#';
+ break;
case Debug.ScopeType.Closure:
result += 'Closure';
break;
diff --git a/V8Binding/v8/src/debug-agent.cc b/V8Binding/v8/src/debug-agent.cc
index 3dba53a..9d5cace 100644
--- a/V8Binding/v8/src/debug-agent.cc
+++ b/V8Binding/v8/src/debug-agent.cc
@@ -65,6 +65,7 @@ void DebuggerAgent::Run() {
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
+ listening_->Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@@ -93,6 +94,10 @@ void DebuggerAgent::Shutdown() {
}
+void DebuggerAgent::WaitUntilListening() {
+ listening_->Wait();
+}
+
void DebuggerAgent::CreateSession(Socket* client) {
ScopedLock with(session_access_);
diff --git a/V8Binding/v8/src/debug-agent.h b/V8Binding/v8/src/debug-agent.h
index 04f883f..3647994 100644
--- a/V8Binding/v8/src/debug-agent.h
+++ b/V8Binding/v8/src/debug-agent.h
@@ -47,7 +47,8 @@ class DebuggerAgent: public Thread {
: name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)) {
+ terminate_now_(OS::CreateSemaphore(0)),
+ listening_(OS::CreateSemaphore(0)) {
ASSERT(instance_ == NULL);
instance_ = this;
}
@@ -57,6 +58,7 @@ class DebuggerAgent: public Thread {
}
void Shutdown();
+ void WaitUntilListening();
private:
void Run();
@@ -72,6 +74,7 @@ class DebuggerAgent: public Thread {
Mutex* session_access_; // Mutex guarging access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
+ Semaphore* listening_;
static DebuggerAgent* instance_;
diff --git a/V8Binding/v8/src/debug-delay.js b/V8Binding/v8/src/debug-delay.js
index ce70c75..cb789be 100644
--- a/V8Binding/v8/src/debug-delay.js
+++ b/V8Binding/v8/src/debug-delay.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Default number of frames to include in the response to backtrace request.
const kDefaultBacktraceLength = 10;
@@ -35,7 +33,7 @@ const Debug = {};
// Regular expression to skip "crud" at the beginning of a source line which is
// not really code. Currently the regular expression matches whitespace and
// comments.
-const sourceLineBeginningSkip = /^(?:[ \v\h]*(?:\/\*.*?\*\/)*)*/;
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// Debug events which can occour in the V8 JavaScript engine. These originate
// from the API include file debug.h.
@@ -350,7 +348,7 @@ ScriptBreakPoint.prototype.set = function (script) {
if (!script.sourceColumnStart_) {
script.sourceColumnStart_ = new Array(script.lineCount());
}
-
+
// Fill cache if needed and get column where the actual source starts.
if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
script.sourceColumnStart_[line] =
@@ -361,11 +359,11 @@ ScriptBreakPoint.prototype.set = function (script) {
// Convert the line and column into an absolute position within the script.
var pos = Debug.findScriptSourcePosition(script, this.line(), column);
-
+
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
if (pos === null) return;
-
+
// Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
@@ -492,7 +490,7 @@ Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
// Returns the character position in a script based on a line number and an
// optional position within that line.
Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
+ var location = script.locationFromLine(opt_line, opt_column);
return location ? location.position : null;
}
@@ -944,7 +942,7 @@ ExceptionEvent.prototype.toJSONProtocol = function() {
o.body = { uncaught: this.uncaught_,
exception: MakeMirror(this.exception_)
};
-
+
// Exceptions might happen whithout any JavaScript frames.
if (this.exec_state_.frameCount() > 0) {
o.body.sourceLine = this.sourceLine();
@@ -1097,7 +1095,7 @@ DebugCommandProcessor.prototype.processDebugRequest = function (request) {
function ProtocolMessage(request) {
// Update sequence number.
this.seq = next_response_seq++;
-
+
if (request) {
// If message is based on a request this is a response. Fill the initial
// response from the request.
@@ -1487,7 +1485,7 @@ DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request,
response.failed('Missing argument "groupId"');
return;
}
-
+
var cleared_break_points = [];
var new_script_break_points = [];
for (var i = 0; i < script_break_points.length; i++) {
@@ -1603,7 +1601,7 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
if (index < 0 || this.exec_state_.frameCount() <= index) {
return response.failed('Invalid frame number');
}
-
+
this.exec_state_.setSelectedFrame(request.arguments.number);
}
response.body = this.exec_state_.frame();
@@ -1633,7 +1631,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
-
+
// Fill all scopes for this frame.
var total_scopes = frame.scopeCount();
var scopes = [];
@@ -1750,7 +1748,7 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
-
+
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
diff --git a/V8Binding/v8/src/debug.cc b/V8Binding/v8/src/debug.cc
index cfbadf3..ec658d6 100644
--- a/V8Binding/v8/src/debug.cc
+++ b/V8Binding/v8/src/debug.cc
@@ -518,6 +518,7 @@ void Debug::ThreadInit() {
thread_local_.step_count_ = 0;
thread_local_.last_fp_ = 0;
thread_local_.step_into_fp_ = 0;
+ thread_local_.step_out_fp_ = 0;
thread_local_.after_break_target_ = 0;
thread_local_.debugger_entry_ = NULL;
thread_local_.pending_interrupts_ = 0;
@@ -562,7 +563,6 @@ bool Debug::break_on_exception_ = false;
bool Debug::break_on_uncaught_exception_ = true;
Handle<Context> Debug::debug_context_ = Handle<Context>();
-Code* Debug::debug_break_return_entry_ = NULL;
Code* Debug::debug_break_return_ = NULL;
@@ -643,11 +643,6 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
void Debug::Setup(bool create_heap_objects) {
ThreadInit();
if (create_heap_objects) {
- // Get code to handle entry to debug break on return.
- debug_break_return_entry_ =
- Builtins::builtin(Builtins::Return_DebugBreakEntry);
- ASSERT(debug_break_return_entry_->IsCode());
-
// Get code to handle debug break on return.
debug_break_return_ =
Builtins::builtin(Builtins::Return_DebugBreak);
@@ -809,7 +804,6 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_entry_)));
v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_)));
}
@@ -864,11 +858,18 @@ Object* Debug::Break(Arguments args) {
break_points_hit = CheckBreakPoints(break_point_objects);
}
- // Notify debugger if a real break point is triggered or if performing single
- // stepping with no more steps to perform. Otherwise do another step.
- if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
+ // If step out is active skip everything until the frame where we need to step
+ // out to is reached, unless real breakpoint is hit.
+ if (Debug::StepOutActive() && frame->fp() != Debug::step_out_fp() &&
+ break_points_hit->IsUndefined() ) {
+ // Step count should always be 0 for StepOut.
+ ASSERT(thread_local_.step_count_ == 0);
+ } else if (!break_points_hit->IsUndefined() ||
+ (thread_local_.last_step_action_ != StepNone &&
+ thread_local_.step_count_ == 0)) {
+ // Notify debugger if a real break point is triggered or if performing
+ // single stepping with no more steps to perform. Otherwise do another step.
+
// Clear all current stepping setup.
ClearStepping();
@@ -1104,7 +1105,13 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Remember this step action and count.
thread_local_.last_step_action_ = step_action;
- thread_local_.step_count_ = step_count;
+ if (step_action == StepOut) {
+ // For step out target frame will be found on the stack so there is no need
+ // to set step counter for it. It's expected to always be 0 for StepOut.
+ thread_local_.step_count_ = 0;
+ } else {
+ thread_local_.step_count_ = step_count;
+ }
// Get the frame where the execution has stopped and skip the debug frame if
// any. The debug frame will only be present if execution was stopped due to
@@ -1183,13 +1190,28 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If this is the last break code target step out is the only possibility.
if (it.IsExit() || step_action == StepOut) {
+ if (step_action == StepOut) {
+ // Skip step_count frames starting with the current one.
+ while (step_count-- > 0 && !frames_it.done()) {
+ frames_it.Advance();
+ }
+ } else {
+ ASSERT(it.IsExit());
+ frames_it.Advance();
+ }
+ // Skip builtin functions on the stack.
+ while (!frames_it.done() &&
+ JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
+ frames_it.Advance();
+ }
// Step out: If there is a JavaScript caller frame, we need to
// flood it with breakpoints.
- frames_it.Advance();
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+ // Set target frame pointer.
+ ActivateStepOut(frames_it.frame());
}
} else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) ||
!call_function_stub.is_null())
@@ -1445,6 +1467,7 @@ void Debug::ClearStepping() {
// Clear the various stepping setup.
ClearOneShot();
ClearStepIn();
+ ClearStepOut();
ClearStepNext();
// Clear multiple step counter.
@@ -1472,6 +1495,7 @@ void Debug::ClearOneShot() {
void Debug::ActivateStepIn(StackFrame* frame) {
+ ASSERT(!StepOutActive());
thread_local_.step_into_fp_ = frame->fp();
}
@@ -1481,6 +1505,17 @@ void Debug::ClearStepIn() {
}
+void Debug::ActivateStepOut(StackFrame* frame) {
+ ASSERT(!StepInActive());
+ thread_local_.step_out_fp_ = frame->fp();
+}
+
+
+void Debug::ClearStepOut() {
+ thread_local_.step_out_fp_ = 0;
+}
+
+
void Debug::ClearStepNext() {
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
@@ -1569,29 +1604,28 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point.
- Address addr = frame->pc() - Assembler::kPatchReturnSequenceLength;
+ Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
// Check if the location is at JS exit.
- bool at_js_exit = false;
+ bool at_js_return = false;
+ bool break_at_js_return_active = false;
RelocIterator it(debug_info->code());
while (!it.done()) {
if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- at_js_exit = (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
+ at_js_return = (it.rinfo()->pc() ==
+ addr - Assembler::kPatchReturnSequenceAddressOffset);
+ break_at_js_return_active = it.rinfo()->IsCallInstruction();
}
it.next();
}
// Handle the jump to continue execution after break point depending on the
// break location.
- if (at_js_exit) {
- // First check if the call in the code is still the debug break return
- // entry code. If it is the break point is still active. If not the break
- // point was removed during break point processing.
- if (Assembler::target_address_at(addr) ==
- debug_break_return_entry()->entry()) {
- // Break point still active. Jump to the corresponding place in the
- // original code.
+ if (at_js_return) {
+ // If the break point as return is still active jump to the corresponding
+ // place in the original code. If not the break point was removed during
+ // break point processing.
+ if (break_at_js_return_active) {
addr += original_code->instruction_start() - code->instruction_start();
}
@@ -2464,6 +2498,11 @@ void Debugger::StopAgent() {
}
+void Debugger::WaitForAgent() {
+ if (agent_ != NULL)
+ agent_->WaitUntilListening();
+}
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
diff --git a/V8Binding/v8/src/debug.h b/V8Binding/v8/src/debug.h
index 38789e1..29c2bc2 100644
--- a/V8Binding/v8/src/debug.h
+++ b/V8Binding/v8/src/debug.h
@@ -282,6 +282,9 @@ class Debug {
static Address step_in_fp() { return thread_local_.step_into_fp_; }
static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
+ static bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+ static Address step_out_fp() { return thread_local_.step_out_fp_; }
+
static EnterDebugger* debugger_entry() {
return thread_local_.debugger_entry_;
}
@@ -332,10 +335,8 @@ class Debug {
return &registers_[r];
}
- // Address of the debug break return entry code.
- static Code* debug_break_return_entry() { return debug_break_return_entry_; }
-
- // Support for getting the address of the debug break on return code.
+ // Access to the debug break on return code.
+ static Code* debug_break_return() { return debug_break_return_; }
static Code** debug_break_return_address() {
return &debug_break_return_;
}
@@ -353,6 +354,7 @@ class Debug {
static char* ArchiveDebug(char* to);
static char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources() { }
// Mirror cache handling.
static void ClearMirrorCache();
@@ -382,7 +384,6 @@ class Debug {
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateReturnDebugBreakEntry(MacroAssembler* masm);
static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
// Called from stub-cache.cc.
@@ -393,6 +394,8 @@ class Debug {
static void ClearOneShot();
static void ActivateStepIn(StackFrame* frame);
static void ClearStepIn();
+ static void ActivateStepOut(StackFrame* frame);
+ static void ClearStepOut();
static void ClearStepNext();
// Returns whether the compile succeeded.
static bool EnsureCompiled(Handle<SharedFunctionInfo> shared);
@@ -445,6 +448,10 @@ class Debug {
// Frame pointer for frame from which step in was performed.
Address step_into_fp_;
+ // Frame pointer for the frame where debugger should be called when current
+ // step out action is completed.
+ Address step_out_fp_;
+
// Storage location for jump when exiting debug break calls.
Address after_break_target_;
@@ -460,9 +467,6 @@ class Debug {
static ThreadLocal thread_local_;
static void ThreadInit();
- // Code object for debug break return entry code.
- static Code* debug_break_return_entry_;
-
// Code to call for handling debug break on return.
static Code* debug_break_return_;
@@ -642,6 +646,9 @@ class Debugger {
// Stop the debugger agent.
static void StopAgent();
+ // Blocks until the agent has started listening for connections
+ static void WaitForAgent();
+
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc
index 04ec905..8bc6b74 100644
--- a/V8Binding/v8/src/execution.cc
+++ b/V8Binding/v8/src/execution.cc
@@ -61,9 +61,6 @@ static Handle<Object> Invoke(bool construct,
// Entering JavaScript.
VMState state(JS);
- // Guard the stack against too much recursion.
- StackGuard guard;
-
// Placeholder for return value.
Object* value = reinterpret_cast<Object*>(kZapValue);
@@ -217,55 +214,6 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
StackGuard::ThreadLocal StackGuard::thread_local_;
-StackGuard::StackGuard() {
- // NOTE: Overall the StackGuard code assumes that the stack grows towards
- // lower addresses.
- ExecutionAccess access;
- if (thread_local_.nesting_++ == 0) {
- // Initial StackGuard is being set. We will set the stack limits based on
- // the current stack pointer allowing the stack to grow kLimitSize from
- // here.
-
- // Ensure that either the stack limits are unset (kIllegalLimit) or that
- // they indicate a pending interruption. The interrupt limit will be
- // temporarily reset through the code below and reestablished if the
- // interrupt flags indicate that an interrupt is pending.
- ASSERT(thread_local_.jslimit_ == kIllegalLimit ||
- (thread_local_.jslimit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
- ASSERT(thread_local_.climit_ == kIllegalLimit ||
- (thread_local_.climit_ == kInterruptLimit &&
- thread_local_.interrupt_flags_ != 0));
-
- uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
- thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
- // NOTE: The check for overflow is not safe as there is no guarantee that
- // the running thread has its stack in all memory up to address 0x00000000.
- thread_local_.initial_climit_ = thread_local_.climit_ =
- reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
- reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
-
- if (thread_local_.interrupt_flags_ != 0) {
- set_limits(kInterruptLimit, access);
- }
- }
- // Ensure that proper limits have been set.
- ASSERT(thread_local_.jslimit_ != kIllegalLimit &&
- thread_local_.climit_ != kIllegalLimit);
- ASSERT(thread_local_.initial_jslimit_ != kIllegalLimit &&
- thread_local_.initial_climit_ != kIllegalLimit);
-}
-
-
-StackGuard::~StackGuard() {
- ExecutionAccess access;
- if (--thread_local_.nesting_ == 0) {
- set_limits(kIllegalLimit, access);
- }
-}
-
-
bool StackGuard::IsStackOverflow() {
ExecutionAccess access;
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -285,15 +233,16 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access;
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
- thread_local_.jslimit_ = limit;
- Heap::SetStackLimit(limit);
+ thread_local_.jslimit_ = jslimit;
+ Heap::SetStackLimit(jslimit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit;
}
thread_local_.initial_climit_ = limit;
- thread_local_.initial_jslimit_ = limit;
+ thread_local_.initial_jslimit_ = jslimit;
}
@@ -407,6 +356,61 @@ char* StackGuard::RestoreStackGuard(char* from) {
}
+static internal::Thread::LocalStorageKey stack_limit_key =
+ internal::Thread::CreateThreadLocalKey();
+
+
+void StackGuard::FreeThreadResources() {
+ Thread::SetThreadLocal(
+ stack_limit_key,
+ reinterpret_cast<void*>(thread_local_.initial_climit_));
+}
+
+
+void StackGuard::ThreadLocal::Clear() {
+ initial_jslimit_ = kIllegalLimit;
+ jslimit_ = kIllegalLimit;
+ initial_climit_ = kIllegalLimit;
+ climit_ = kIllegalLimit;
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+ Heap::SetStackLimit(kIllegalLimit);
+}
+
+
+void StackGuard::ThreadLocal::Initialize() {
+ if (initial_climit_ == kIllegalLimit) {
+ // Takes the address of the limit variable in order to find out where
+ // the top of stack is right now.
+ intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+ initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ initial_climit_ = limit;
+ climit_ = limit;
+ Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+ }
+ nesting_ = 0;
+ postpone_interrupts_nesting_ = 0;
+ interrupt_flags_ = 0;
+}
+
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+}
+
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ thread_local_.Initialize();
+ void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != NULL) {
+ StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+ }
+}
+
+
// --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
diff --git a/V8Binding/v8/src/execution.h b/V8Binding/v8/src/execution.h
index 4cdfd2b..55307f7 100644
--- a/V8Binding/v8/src/execution.h
+++ b/V8Binding/v8/src/execution.h
@@ -141,14 +141,13 @@ class Execution : public AllStatic {
class ExecutionAccess;
-// Stack guards are used to limit the number of nested invocations of
-// JavaScript and the stack size used in each invocation.
-class StackGuard BASE_EMBEDDED {
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class StackGuard : public AllStatic {
public:
- StackGuard();
-
- ~StackGuard();
-
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
static Address address_of_jslimit() {
@@ -159,6 +158,13 @@ class StackGuard BASE_EMBEDDED {
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
static int ArchiveSpacePerThread();
+ static void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ static void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ static void ClearThread(const ExecutionAccess& lock);
static bool IsStackOverflow();
static bool IsPreempted();
@@ -175,6 +181,13 @@ class StackGuard BASE_EMBEDDED {
#endif
static void Continue(InterruptFlag after_what);
+ // This provides an asynchronous read of the stack limit for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ static uintptr_t climit() {
+ return thread_local_.climit_;
+ }
+
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
@@ -183,13 +196,6 @@ class StackGuard BASE_EMBEDDED {
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
- // This provides an asynchronous read of the stack limit for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- static uintptr_t climit() {
- return thread_local_.climit_;
- }
-
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
Heap::SetStackLimit(value);
@@ -200,14 +206,9 @@ class StackGuard BASE_EMBEDDED {
// Reset limits to initial values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
- if (thread_local_.nesting_ == 0) {
- // No limits have been set yet.
- set_limits(kIllegalLimit, lock);
- } else {
- thread_local_.jslimit_ = thread_local_.initial_jslimit_;
- Heap::SetStackLimit(thread_local_.jslimit_);
- thread_local_.climit_ = thread_local_.initial_climit_;
- }
+ thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ Heap::SetStackLimit(thread_local_.jslimit_);
+ thread_local_.climit_ = thread_local_.initial_climit_;
}
// Enable or disable interrupts.
@@ -217,24 +218,19 @@ class StackGuard BASE_EMBEDDED {
static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
- static const uintptr_t kIllegalLimit = V8_UINT64_C(0xffffffffffffffff);
+ static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xffffffff;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
#endif
class ThreadLocal {
public:
- ThreadLocal()
- : initial_jslimit_(kIllegalLimit),
- jslimit_(kIllegalLimit),
- initial_climit_(kIllegalLimit),
- climit_(kIllegalLimit),
- nesting_(0),
- postpone_interrupts_nesting_(0),
- interrupt_flags_(0) {
- Heap::SetStackLimit(kIllegalLimit);
- }
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Initialize();
+ void Clear();
uintptr_t initial_jslimit_;
uintptr_t jslimit_;
uintptr_t initial_climit_;
diff --git a/V8Binding/v8/src/factory.cc b/V8Binding/v8/src/factory.cc
index bb6987b..622055c 100644
--- a/V8Binding/v8/src/factory.cc
+++ b/V8Binding/v8/src/factory.cc
@@ -401,10 +401,12 @@ Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
- Handle<JSFunction> fun =
- Handle<JSFunction>(
- JSFunction::cast(
- Top::builtins()->GetProperty(*make_str)));
+ Handle<Object> fun_obj(Top::builtins()->GetProperty(*make_str));
+ // If the builtins haven't been properly configured yet this error
+ // constructor may not have been defined. Bail out.
+ if (!fun_obj->IsJSFunction())
+ return Factory::undefined_value();
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
@@ -671,6 +673,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
}
+Handle<String> Factory::NumberToString(Handle<Object> number) {
+ CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+}
+
+
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
diff --git a/V8Binding/v8/src/factory.h b/V8Binding/v8/src/factory.h
index ddf71de..0596fbf 100644
--- a/V8Binding/v8/src/factory.h
+++ b/V8Binding/v8/src/factory.h
@@ -286,6 +286,8 @@ class Factory : public AllStatic {
Handle<Object> value,
PropertyAttributes attributes);
+ static Handle<String> NumberToString(Handle<Object> number);
+
enum ApiInstanceType {
JavaScriptObject,
InnerGlobalObject,
diff --git a/V8Binding/v8/src/flag-definitions.h b/V8Binding/v8/src/flag-definitions.h
index c05feb4..91c5bca 100644
--- a/V8Binding/v8/src/flag-definitions.h
+++ b/V8Binding/v8/src/flag-definitions.h
@@ -133,7 +133,6 @@ DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
-DEFINE_bool(multipass, false, "use the multipass code generator")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -271,7 +270,6 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_cfg, false, "print control-flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/V8Binding/v8/src/handles.cc b/V8Binding/v8/src/handles.cc
index fae006a..b43ec53 100644
--- a/V8Binding/v8/src/handles.cc
+++ b/V8Binding/v8/src/handles.cc
@@ -29,6 +29,7 @@
#include "accessors.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "debug.h"
@@ -46,10 +47,10 @@ v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
int HandleScope::NumberOfHandles() {
- int n = HandleScopeImplementer::instance()->Blocks()->length();
+ int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) +
- (current_.next - HandleScopeImplementer::instance()->Blocks()->last());
+ (current_.next - HandleScopeImplementer::instance()->blocks()->last());
}
@@ -67,8 +68,8 @@ Object** HandleScope::Extend() {
HandleScopeImplementer* impl = HandleScopeImplementer::instance();
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
- if (!impl->Blocks()->is_empty()) {
- Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
+ if (!impl->blocks()->is_empty()) {
+ Object** limit = &impl->blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) {
current_.limit = limit;
}
@@ -81,7 +82,7 @@ Object** HandleScope::Extend() {
result = impl->GetSpareOrNewBlock();
// Add the extension to the global list of blocks, but count the
// extension as part of the current scope.
- impl->Blocks()->Add(result);
+ impl->blocks()->Add(result);
current_.extensions++;
current_.limit = &result[kHandleBlockSize];
}
@@ -479,15 +480,17 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
}
+void CustomArguments::IterateInstance(ObjectVisitor* v) {
+ v->VisitPointers(values_, values_ + 4);
+}
+
+
// Compute the property keys from the interceptor.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
@@ -507,11 +510,8 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object) {
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- Handle<Object> data(interceptor->data());
- v8::AccessorInfo info(
- v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(object));
+ CustomArguments args(interceptor->data(), *receiver, *object);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
@@ -527,55 +527,53 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
}
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+ KeyCollectionType type) {
Handle<FixedArray> content = Factory::empty_fixed_array();
- JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
- bool allow_enumeration = (object->map()->constructor() != arguments_function);
-
// Only collect keys if access is permitted.
- if (allow_enumeration) {
- for (Handle<Object> p = object;
- *p != Heap::null_value();
- p = Handle<Object>(p->GetPrototype())) {
- Handle<JSObject> current(JSObject::cast(*p));
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*current, Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
- break;
- }
+ for (Handle<Object> p = object;
+ *p != Heap::null_value();
+ p = Handle<Object>(p->GetPrototype())) {
+ Handle<JSObject> current(JSObject::cast(*p));
+
+ // Check access rights if required.
+ if (current->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*current, Heap::undefined_value(),
+ v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ break;
+ }
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- Factory::NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- content = UnionOfKeys(content, element_keys);
-
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForIndexedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- }
+ // Compute the element keys.
+ Handle<FixedArray> element_keys =
+ Factory::NewFixedArray(current->NumberOfEnumElements());
+ current->GetEnumElementKeys(*element_keys);
+ content = UnionOfKeys(content, element_keys);
+
+ // Add the element keys from the interceptor.
+ if (current->HasIndexedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForIndexedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+ }
- // Compute the property keys.
- content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+ // Compute the property keys.
+ content = UnionOfKeys(content, GetEnumPropertyKeys(current));
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForNamedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- }
+ // Add the property keys from the interceptor.
+ if (current->HasNamedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForNamedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
+
+ // If we only want local properties we bail out after the first
+ // iteration.
+ if (type == LOCAL_ONLY)
+ break;
}
return content;
}
@@ -583,7 +581,8 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
Counters::for_in.Increment();
- Handle<FixedArray> elements = GetKeysInFixedArrayFor(object);
+ Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
+ INCLUDE_PROTOS);
return Factory::NewJSArrayWithElements(elements);
}
diff --git a/V8Binding/v8/src/handles.h b/V8Binding/v8/src/handles.h
index 847aebb..5d57465 100644
--- a/V8Binding/v8/src/handles.h
+++ b/V8Binding/v8/src/handles.h
@@ -265,9 +265,13 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object);
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object);
+
+enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
+
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+ KeyCollectionType type);
Handle<JSArray> GetKeysFor(Handle<JSObject> object);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
diff --git a/V8Binding/v8/src/heap-profiler.cc b/V8Binding/v8/src/heap-profiler.cc
new file mode 100644
index 0000000..ecb6919
--- /dev/null
+++ b/V8Binding/v8/src/heap-profiler.cc
@@ -0,0 +1,626 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "heap-profiler.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+namespace {
+
+// Clusterizer is a set of helper functions for converting
+// object references into clusters.
+class Clusterizer : public AllStatic {
+ public:
+ static JSObjectsCluster Clusterize(HeapObject* obj) {
+ return Clusterize(obj, true);
+ }
+ static void InsertIntoTree(JSObjectsClusterTree* tree,
+ HeapObject* obj, bool fine_grain);
+ static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster) {
+ InsertIntoTree(tree, cluster, 0);
+ }
+
+ private:
+ static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
+ static int CalculateNetworkSize(JSObject* obj);
+ static int GetObjectSize(HeapObject* obj) {
+ return obj->IsJSObject() ?
+ CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
+ }
+ static void InsertIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster, int size);
+};
+
+
+JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
+ if (obj->IsJSObject()) {
+ JSObject* js_obj = JSObject::cast(obj);
+ String* constructor = JSObject::cast(js_obj)->constructor_name();
+ // Differentiate Object and Array instances.
+ if (fine_grain && (constructor == Heap::Object_symbol() ||
+ constructor == Heap::Array_symbol())) {
+ return JSObjectsCluster(constructor, obj);
+ } else {
+ return JSObjectsCluster(constructor);
+ }
+ } else if (obj->IsString()) {
+ return JSObjectsCluster(Heap::String_symbol());
+ }
+ return JSObjectsCluster();
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+ HeapObject* obj, bool fine_grain) {
+ JSObjectsCluster cluster = Clusterize(obj, fine_grain);
+ if (cluster.is_null()) return;
+ InsertIntoTree(tree, cluster, GetObjectSize(obj));
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster, int size) {
+ JSObjectsClusterTree::Locator loc;
+ tree->Insert(cluster, &loc);
+ NumberAndSizeInfo number_and_size = loc.value();
+ number_and_size.increment_number(1);
+ number_and_size.increment_bytes(size);
+ loc.set_value(number_and_size);
+}
+
+
+int Clusterizer::CalculateNetworkSize(JSObject* obj) {
+ int size = obj->Size();
+ // If 'properties' and 'elements' are non-empty (thus, non-shared),
+ // take their size into account.
+ if (FixedArray::cast(obj->properties())->length() != 0) {
+ size += obj->properties()->Size();
+ }
+ if (FixedArray::cast(obj->elements())->length() != 0) {
+ size += obj->elements()->Size();
+ }
+ return size;
+}
+
+
+// A helper class for recording back references.
+class ReferencesExtractor : public ObjectVisitor {
+ public:
+ ReferencesExtractor(const JSObjectsCluster& cluster,
+ RetainerHeapProfile* profile)
+ : cluster_(cluster),
+ profile_(profile),
+ inside_array_(false) {
+ }
+
+ void VisitPointer(Object** o) {
+ if ((*o)->IsJSObject() || (*o)->IsString()) {
+ profile_->StoreReference(cluster_, HeapObject::cast(*o));
+ } else if ((*o)->IsFixedArray() && !inside_array_) {
+ // Traverse one level deep for data members that are fixed arrays.
+ // This covers the case of 'elements' and 'properties' of JSObject,
+ // and function contexts.
+ inside_array_ = true;
+ FixedArray::cast(*o)->Iterate(this);
+ inside_array_ = false;
+ }
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ private:
+ const JSObjectsCluster& cluster_;
+ RetainerHeapProfile* profile_;
+ bool inside_array_;
+};
+
+
+// A printer interface implementation for the Retainers profile.
+class RetainersPrinter : public RetainerHeapProfile::Printer {
+ public:
+ void PrintRetainers(const JSObjectsCluster& cluster,
+ const StringStream& retainers) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ cluster.Print(&stream);
+ LOG(HeapSampleJSRetainersEvent(
+ *(stream.ToCString()), *(retainers.ToCString())));
+ }
+};
+
+
+// Visitor for printing a cluster tree.
+class ClusterTreePrinter BASE_EMBEDDED {
+ public:
+ explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ Print(stream_, cluster, number_and_size);
+ }
+ static void Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ StringStream* stream_;
+};
+
+
+void ClusterTreePrinter::Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ stream->Put(',');
+ cluster.Print(stream);
+ stream->Add(";%d", number_and_size.number());
+}
+
+
+// Visitor for printing a retainer tree.
+class SimpleRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
+ : printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ tree->ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// Visitor for aggregating references count of equivalent clusters.
+class RetainersAggregator BASE_EMBEDDED {
+ public:
+ RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
+ : coarser_(coarser), dest_tree_(dest_tree) {}
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsClusterTree* dest_tree_;
+};
+
+
+void RetainersAggregator::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) eq = cluster;
+ JSObjectsClusterTree::Locator loc;
+ dest_tree_->Insert(eq, &loc);
+ NumberAndSizeInfo aggregated_number = loc.value();
+ aggregated_number.increment_number(number_and_size.number());
+ loc.set_value(aggregated_number);
+}
+
+
+// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
+class AggregatingRetainerTreePrinter BASE_EMBEDDED {
+ public:
+ AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
+ RetainerHeapProfile::Printer* printer)
+ : coarser_(coarser), printer_(printer) {}
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+
+ private:
+ ClustersCoarser* coarser_;
+ RetainerHeapProfile::Printer* printer_;
+};
+
+
+void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+ JSObjectsClusterTree dest_tree_;
+ RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+ tree->ForEach(&retainers_aggregator);
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ ClusterTreePrinter retainers_printer(&stream);
+ dest_tree_.ForEach(&retainers_printer);
+ printer_->PrintRetainers(cluster, stream);
+}
+
+
+// A helper class for building a retainers tree, that aggregates
+// all equivalent clusters.
+class RetainerTreeAggregator BASE_EMBEDDED {
+ public:
+ explicit RetainerTreeAggregator(ClustersCoarser* coarser)
+ : coarser_(coarser) {}
+ void Process(JSObjectsRetainerTree* input_tree) {
+ input_tree->ForEach(this);
+ }
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ JSObjectsRetainerTree& output_tree() { return output_tree_; }
+
+ private:
+ ClustersCoarser* coarser_;
+ JSObjectsRetainerTree output_tree_;
+};
+
+
+void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
+ if (eq.is_null()) return;
+ JSObjectsRetainerTree::Locator loc;
+ if (output_tree_.Insert(eq, &loc)) {
+ loc.set_value(new JSObjectsClusterTree());
+ }
+ RetainersAggregator retainers_aggregator(coarser_, loc.value());
+ tree->ForEach(&retainers_aggregator);
+}
+
+} // namespace
+
+
+const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
+const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
+
+
+ConstructorHeapProfile::ConstructorHeapProfile()
+ : zscope_(DELETE_ON_EXIT) {
+}
+
+
+void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ cluster.Print(&stream);
+ LOG(HeapSampleJSConstructorEvent(*(stream.ToCString()),
+ number_and_size.number(),
+ number_and_size.bytes()));
+}
+
+
+void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
+ Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
+}
+
+
+void ConstructorHeapProfile::PrintStats() {
+ js_objects_info_tree_.ForEach(this);
+}
+
+
+void JSObjectsCluster::Print(StringStream* accumulator) const {
+ ASSERT(!is_null());
+ if (constructor_ == FromSpecialCase(ROOTS)) {
+ accumulator->Add("(roots)");
+ } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
+ accumulator->Add("(global property)");
+ } else if (constructor_ == FromSpecialCase(SELF)) {
+ accumulator->Add("(self)");
+ } else {
+ SmartPointer<char> s_name(
+ constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
+ accumulator->Add("%s", (*s_name)[0] != '\0' ? *s_name : "(anonymous)");
+ if (instance_ != NULL) {
+ accumulator->Add(":%p", static_cast<void*>(instance_));
+ }
+ }
+}
+
+
+void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
+ if (!is_null()) {
+ Print(accumulator);
+ } else {
+ accumulator->Add("(null cluster)");
+ }
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+ const JSObjectsCluster& cluster_)
+ : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+ const ClustersCoarser::ClusterBackRefs& src)
+ : cluster(src.cluster), refs(src.refs.capacity()) {
+ refs.AddAll(src.refs);
+}
+
+
+inline ClustersCoarser::ClusterBackRefs&
+ ClustersCoarser::ClusterBackRefs::operator=(
+ const ClustersCoarser::ClusterBackRefs& src) {
+ if (this == &src) return *this;
+ cluster = src.cluster;
+ refs.Clear();
+ refs.AddAll(src.refs);
+ return *this;
+}
+
+
+inline int ClustersCoarser::ClusterBackRefs::Compare(
+ const ClustersCoarser::ClusterBackRefs& a,
+ const ClustersCoarser::ClusterBackRefs& b) {
+ int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
+ if (cmp != 0) return cmp;
+ if (a.refs.length() < b.refs.length()) return -1;
+ if (a.refs.length() > b.refs.length()) return 1;
+ for (int i = 0; i < a.refs.length(); ++i) {
+ int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
+ if (cmp != 0) return cmp;
+ }
+ return 0;
+}
+
+
+ClustersCoarser::ClustersCoarser()
+ : zscope_(DELETE_ON_EXIT),
+ sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
+ current_pair_(NULL),
+ current_set_(NULL),
+ self_(NULL) {
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ if (!cluster.can_be_coarsed()) return;
+ ClusterBackRefs pair(cluster);
+ ASSERT(current_pair_ == NULL);
+ current_pair_ = &pair;
+ current_set_ = new JSObjectsRetainerTree();
+ self_ = &cluster;
+ tree->ForEach(this);
+ sim_list_.Add(pair);
+ current_pair_ = NULL;
+ current_set_ = NULL;
+ self_ = NULL;
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ ASSERT(current_pair_ != NULL);
+ ASSERT(current_set_ != NULL);
+ ASSERT(self_ != NULL);
+ JSObjectsRetainerTree::Locator loc;
+ if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
+ current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
+ return;
+ }
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ if (!eq.is_null()) {
+ if (current_set_->Find(eq, &loc)) return;
+ current_pair_->refs.Add(eq);
+ current_set_->Insert(eq, &loc);
+ } else {
+ current_pair_->refs.Add(cluster);
+ }
+}
+
+
+void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
+ int last_eq_clusters = -1;
+ for (int i = 0; i < kMaxPassesCount; ++i) {
+ sim_list_.Clear();
+ const int curr_eq_clusters = DoProcess(tree);
+ // If no new cluster equivalents discovered, abort processing.
+ if (last_eq_clusters == curr_eq_clusters) break;
+ last_eq_clusters = curr_eq_clusters;
+ }
+}
+
+
+int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
+ tree->ForEach(this);
+ sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
+ sim_list_.Sort(ClusterBackRefsCmp);
+ return FillEqualityTree();
+}
+
+
+JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
+ const JSObjectsCluster& cluster) {
+ if (!cluster.can_be_coarsed()) return JSObjectsCluster();
+ EqualityTree::Locator loc;
+ return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
+}
+
+
+bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
+ // Return true for coarsible clusters that have a non-identical equivalent.
+ if (!cluster.can_be_coarsed()) return false;
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
+}
+
+
+int ClustersCoarser::FillEqualityTree() {
+ int eq_clusters_count = 0;
+ int eq_to = 0;
+ bool first_added = false;
+ for (int i = 1; i < sim_list_.length(); ++i) {
+ if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
+ EqualityTree::Locator loc;
+ if (!first_added) {
+ // Add self-equivalence, if we have more than one item in this
+ // equivalence class.
+ eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
+ loc.set_value(sim_list_[eq_to].cluster);
+ first_added = true;
+ }
+ eq_tree_.Insert(sim_list_[i].cluster, &loc);
+ loc.set_value(sim_list_[eq_to].cluster);
+ ++eq_clusters_count;
+ } else {
+ eq_to = i;
+ first_added = false;
+ }
+ }
+ return eq_clusters_count;
+}
+
+
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
+const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
+const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
+ NULL;
+
+
+RetainerHeapProfile::RetainerHeapProfile()
+ : zscope_(DELETE_ON_EXIT) {
+ JSObjectsCluster roots(JSObjectsCluster::ROOTS);
+ ReferencesExtractor extractor(roots, this);
+ Heap::IterateRoots(&extractor);
+}
+
+
+void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
+ HeapObject* ref) {
+ JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+ JSObjectsRetainerTree::Locator ref_loc;
+ if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
+ ref_loc.set_value(new JSObjectsClusterTree());
+ }
+ JSObjectsClusterTree* referenced_by = ref_loc.value();
+ Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
+}
+
+
+void RetainerHeapProfile::CollectStats(HeapObject* obj) {
+ if (obj->IsJSObject()) {
+ const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+ ReferencesExtractor extractor(cluster, this);
+ obj->Iterate(&extractor);
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
+ ReferencesExtractor extractor(global_prop, this);
+ obj->Iterate(&extractor);
+ }
+}
+
+
+void RetainerHeapProfile::DebugPrintStats(
+ RetainerHeapProfile::Printer* printer) {
+ coarser_.Process(&retainers_tree_);
+ // Print clusters that have no equivalents, aggregating their retainers.
+ AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
+ retainers_tree_.ForEach(&agg_printer);
+ // Now aggregate clusters that have equivalents...
+ RetainerTreeAggregator aggregator(&coarser_);
+ aggregator.Process(&retainers_tree_);
+ // ...and print them.
+ SimpleRetainerTreePrinter s_printer(printer);
+ aggregator.output_tree().ForEach(&s_printer);
+}
+
+
+void RetainerHeapProfile::PrintStats() {
+ RetainersPrinter printer;
+ DebugPrintStats(&printer);
+}
+
+
+//
+// HeapProfiler class implementation.
+//
+void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ info[type].increment_number(1);
+ info[type].increment_bytes(obj->Size());
+}
+
+
+void HeapProfiler::WriteSample() {
+ LOG(HeapSampleBeginEvent("Heap", "allocated"));
+ LOG(HeapSampleStats(
+ "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
+
+ HistogramInfo info[LAST_TYPE+1];
+#define DEF_TYPE_NAME(name) info[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+ ConstructorHeapProfile js_cons_profile;
+ RetainerHeapProfile js_retainer_profile;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ CollectStats(obj, info);
+ js_cons_profile.CollectStats(obj);
+ js_retainer_profile.CollectStats(obj);
+ }
+
+ // Lump all the string types together.
+ int string_number = 0;
+ int string_bytes = 0;
+#define INCREMENT_SIZE(type, size, name, camel_name) \
+ string_number += info[type].number(); \
+ string_bytes += info[type].bytes();
+ STRING_TYPE_LIST(INCREMENT_SIZE)
+#undef INCREMENT_SIZE
+ if (string_bytes > 0) {
+ LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ }
+
+ for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+ if (info[i].bytes() > 0) {
+ LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
+ }
+ }
+
+ js_cons_profile.PrintStats();
+ js_retainer_profile.PrintStats();
+
+ LOG(HeapSampleEndEvent("Heap", "allocated"));
+}
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/heap-profiler.h b/V8Binding/v8/src/heap-profiler.h
new file mode 100644
index 0000000..7fda883
--- /dev/null
+++ b/V8Binding/v8/src/heap-profiler.h
@@ -0,0 +1,263 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_PROFILER_H_
+#define V8_HEAP_PROFILER_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// The HeapProfiler writes data to the log files, which can be postprocessed
+// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
+class HeapProfiler {
+ public:
+ // Write a single heap sample to the log file.
+ static void WriteSample();
+
+ private:
+ // Update the array info with stats from obj.
+ static void CollectStats(HeapObject* obj, HistogramInfo* info);
+};
+
+
+// JSObjectsCluster describes a group of JS objects that are
+// considered equivalent in terms of a particular profile.
+class JSObjectsCluster BASE_EMBEDDED {
+ public:
+ // These special cases are used in retainer profile.
+ enum SpecialCase {
+ ROOTS = 1,
+ GLOBAL_PROPERTY = 2,
+ SELF = 3 // This case is used in ClustersCoarser only.
+ };
+
+ JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
+ explicit JSObjectsCluster(String* constructor)
+ : constructor_(constructor), instance_(NULL) {}
+ explicit JSObjectsCluster(SpecialCase special)
+ : constructor_(FromSpecialCase(special)), instance_(NULL) {}
+ JSObjectsCluster(String* constructor, Object* instance)
+ : constructor_(constructor), instance_(instance) {}
+
+ static int CompareConstructors(const JSObjectsCluster& a,
+ const JSObjectsCluster& b) {
+ // Strings are unique, so it is sufficient to compare their pointers.
+ return a.constructor_ == b.constructor_ ? 0
+ : (a.constructor_ < b.constructor_ ? -1 : 1);
+ }
+ static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
+ // Strings are unique, so it is sufficient to compare their pointers.
+ const int cons_cmp = CompareConstructors(a, b);
+ return cons_cmp == 0 ?
+ (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
+ : cons_cmp;
+ }
+ static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
+ return Compare(*a, *b);
+ }
+
+ bool is_null() const { return constructor_ == NULL; }
+ bool can_be_coarsed() const { return instance_ != NULL; }
+ String* constructor() const { return constructor_; }
+
+ void Print(StringStream* accumulator) const;
+ // Allows null clusters to be printed.
+ void DebugPrint(StringStream* accumulator) const;
+
+ private:
+ static String* FromSpecialCase(SpecialCase special) {
+ // We use symbols that are illegal JS identifiers to identify special cases.
+ // Their actual value is irrelevant for us.
+ switch (special) {
+ case ROOTS: return Heap::result_symbol();
+ case GLOBAL_PROPERTY: return Heap::code_symbol();
+ case SELF: return Heap::catch_var_symbol();
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+
+ String* constructor_;
+ Object* instance_;
+};
+
+
+struct JSObjectsClusterTreeConfig {
+ typedef JSObjectsCluster Key;
+ typedef NumberAndSizeInfo Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+};
+typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
+
+
+// ConstructorHeapProfile is responsible for gathering and logging
+// "constructor profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class ConstructorHeapProfile BASE_EMBEDDED {
+ public:
+ ConstructorHeapProfile();
+ virtual ~ConstructorHeapProfile() {}
+ void CollectStats(HeapObject* obj);
+ void PrintStats();
+ // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
+ virtual void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ ZoneScope zscope_;
+ JSObjectsClusterTree js_objects_info_tree_;
+};
+
+
+// JSObjectsRetainerTree is used to represent retainer graphs using
+// adjacency list form:
+//
+// Cluster -> (Cluster -> NumberAndSizeInfo)
+//
+// Subordinate splay trees are stored by pointer. They are zone-allocated,
+// so it isn't needed to manage their lifetime.
+//
+struct JSObjectsRetainerTreeConfig {
+ typedef JSObjectsCluster Key;
+ typedef JSObjectsClusterTree* Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+};
+typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
+
+
+class ClustersCoarser BASE_EMBEDDED {
+ public:
+ ClustersCoarser();
+
+ // Processes a given retainer graph.
+ void Process(JSObjectsRetainerTree* tree);
+
+ // Returns an equivalent cluster (can be the cluster itself).
+ // If the given cluster doesn't have an equivalent, returns null cluster.
+ JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
+ // Returns whether a cluster can be substitued with an equivalent and thus,
+ // skipped in some cases.
+ bool HasAnEquivalent(const JSObjectsCluster& cluster);
+
+ // Used by JSObjectsRetainerTree::ForEach.
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ // Stores a list of back references for a cluster.
+ struct ClusterBackRefs {
+ explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
+ ClusterBackRefs(const ClusterBackRefs& src);
+ ClusterBackRefs& operator=(const ClusterBackRefs& src);
+
+ static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
+ void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
+ static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
+
+ JSObjectsCluster cluster;
+ ZoneList<JSObjectsCluster> refs;
+ };
+ typedef ZoneList<ClusterBackRefs> SimilarityList;
+
+ // A tree for storing a list of equivalents for a cluster.
+ struct ClusterEqualityConfig {
+ typedef JSObjectsCluster Key;
+ typedef JSObjectsCluster Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+ };
+ typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
+
+ static int ClusterBackRefsCmp(const ClusterBackRefs* a,
+ const ClusterBackRefs* b) {
+ return ClusterBackRefs::Compare(*a, *b);
+ }
+ int DoProcess(JSObjectsRetainerTree* tree);
+ int FillEqualityTree();
+
+ static const int kInitialBackrefsListCapacity = 2;
+ static const int kInitialSimilarityListCapacity = 2000;
+ // Number of passes for finding equivalents. Limits the length of paths
+ // that can be considered equivalent.
+ static const int kMaxPassesCount = 10;
+
+ ZoneScope zscope_;
+ SimilarityList sim_list_;
+ EqualityTree eq_tree_;
+ ClusterBackRefs* current_pair_;
+ JSObjectsRetainerTree* current_set_;
+ const JSObjectsCluster* self_;
+};
+
+
+// RetainerHeapProfile is responsible for gathering and logging
+// "retainer profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class RetainerHeapProfile BASE_EMBEDDED {
+ public:
+ class Printer {
+ public:
+ virtual ~Printer() {}
+ virtual void PrintRetainers(const JSObjectsCluster& cluster,
+ const StringStream& retainers) = 0;
+ };
+
+ RetainerHeapProfile();
+ void CollectStats(HeapObject* obj);
+ void PrintStats();
+ void DebugPrintStats(Printer* printer);
+ void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
+
+ private:
+ ZoneScope zscope_;
+ JSObjectsRetainerTree retainers_tree_;
+ ClustersCoarser coarser_;
+};
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_PROFILER_H_
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index c29815e..dcc25a3 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -33,6 +33,7 @@
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "debug.h"
+#include "heap-profiler.h"
#include "global-handles.h"
#include "mark-compact.h"
#include "natives.h"
@@ -76,14 +77,17 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
-int Heap::semispace_size_ = 8*MB;
+int Heap::semispace_size_ = 16*MB;
int Heap::old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = 256*MB;
#else
-int Heap::semispace_size_ = 4*MB;
+int Heap::semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
#endif
GCCallback Heap::global_gc_prologue_callback_ = NULL;
@@ -496,8 +500,8 @@ void Heap::PostGarbageCollectionProcessing() {
DisableAssertNoAllocation allow_allocation;
GlobalHandles::PostGarbageCollectionProcessing();
}
- // Update flat string readers.
- FlatStringReader::PostGarbageCollectionProcessing();
+ // Update relocatables.
+ Relocatable::PostGarbageCollectionProcessing();
}
@@ -636,15 +640,7 @@ static void VerifyNonPointerSpacePointers() {
HeapObjectIterator code_it(Heap::code_space());
while (code_it.has_next()) {
HeapObject* object = code_it.next();
- if (object->IsCode()) {
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
- object->Iterate(&v);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- } else {
- // If we find non-code objects in code space (e.g., free list
- // nodes) we want to verify them as well.
- object->Iterate(&v);
- }
+ object->Iterate(&v);
}
HeapObjectIterator data_it(Heap::old_data_space());
@@ -1257,6 +1253,10 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1268,7 +1268,8 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Object* Heap::AllocateHeapNumber(double value) {
// Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
+ if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
@@ -1319,7 +1320,7 @@ bool Heap::CreateApiObjects() {
void Heap::CreateCEntryStub() {
- CEntryStub stub;
+ CEntryStub stub(1);
set_c_entry_code(*stub.GetCode());
}
@@ -1589,6 +1590,31 @@ Object* Heap::SmiOrNumberFromDouble(double value,
}
+Object* Heap::NumberToString(Object* number) {
+ Object* cached = GetNumberStringCache(number);
+ if (cached != undefined_value()) {
+ return cached;
+ }
+
+ char arr[100];
+ Vector<char> buffer(arr, ARRAY_SIZE(arr));
+ const char* str;
+ if (number->IsSmi()) {
+ int num = Smi::cast(number)->value();
+ str = IntToCString(num, buffer);
+ } else {
+ double num = HeapNumber::cast(number)->value();
+ str = DoubleToCString(num, buffer);
+ }
+ Object* result = AllocateStringFromAscii(CStrVector(str));
+
+ if (!result->IsFailure()) {
+ SetNumberStringCache(number, String::cast(result));
+ }
+ return result;
+}
+
+
Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
return SmiOrNumberFromDouble(value,
true /* number object must be new */,
@@ -1869,6 +1895,9 @@ Object* Heap::AllocateByteArray(int length) {
AllocationSpace space =
size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = LO_SPACE;
+
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1896,6 +1925,9 @@ Object* Heap::AllocatePixelArray(int length,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1930,11 +1962,11 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// Initialize the object
HeapObject::cast(result)->set_map(code_map());
Code* code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
code->set_instruction_size(desc.instr_size);
code->set_relocation_size(desc.reloc_size);
code->set_sinfo_size(sinfo_size);
code->set_flags(flags);
- code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -1975,6 +2007,7 @@ Object* Heap::CopyCode(Code* code) {
obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
+ ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
new_code->Relocate(new_addr - old_addr);
return new_code;
}
@@ -2540,13 +2573,17 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqAsciiString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2573,13 +2610,17 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ // New space can't cope with forced allocation.
+ if (always_allocate()) space = OLD_DATA_SPACE;
+
int size = SeqTwoByteString::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2617,7 +2658,7 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* Heap::AllocateRawFixedArray(int length) {
// Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
+ if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
@@ -2670,6 +2711,9 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
ASSERT(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
+ // New space can't cope with forced allocation.
+ if (always_allocate()) pretenure = TENURED;
+
int size = FixedArray::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (pretenure != TENURED) {
@@ -2795,7 +2839,9 @@ STRUCT_LIST(MAKE_CASE)
bool Heap::IdleNotification() {
- static const int kIdlesBeforeCollection = 7;
+ static const int kIdlesBeforeScavenge = 4;
+ static const int kIdlesBeforeMarkSweep = 7;
+ static const int kIdlesBeforeMarkCompact = 8;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
@@ -2808,19 +2854,22 @@ bool Heap::IdleNotification() {
last_gc_count = gc_count_;
}
- if (number_idle_notifications >= kIdlesBeforeCollection) {
- // The first time through we collect without forcing compaction.
- // The second time through we force compaction and quit.
- bool force_compaction =
- number_idle_notifications > kIdlesBeforeCollection;
- CollectAllGarbage(force_compaction);
+ if (number_idle_notifications == kIdlesBeforeScavenge) {
+ CollectGarbage(0, NEW_SPACE);
+ new_space_.Shrink();
last_gc_count = gc_count_;
- if (force_compaction) {
- // Shrink new space.
- new_space_.Shrink();
- number_idle_notifications = 0;
- finished = true;
- }
+
+ } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+ CollectAllGarbage(false);
+ new_space_.Shrink();
+ last_gc_count = gc_count_;
+
+ } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
+ CollectAllGarbage(true);
+ new_space_.Shrink();
+ last_gc_count = gc_count_;
+ number_idle_notifications = 0;
+ finished = true;
}
// Uncommit unused memory in new space.
@@ -3091,6 +3140,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) {
SYNCHRONIZE_TAG("bootstrapper");
Top::Iterate(v);
SYNCHRONIZE_TAG("top");
+ Relocatable::Iterate(v);
+ SYNCHRONIZE_TAG("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
@@ -3185,63 +3236,57 @@ bool Heap::Setup(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
- // Setup memory allocator and allocate an initial chunk of memory. The
- // initial chunk is double the size of the new space to ensure that we can
- // find a pair of semispaces that are contiguous and aligned to their size.
+ // Setup memory allocator and reserve a chunk of memory for new
+ // space. The chunk is double the size of the new space to ensure
+ // that we can find a pair of semispaces that are contiguous and
+ // aligned to their size.
if (!MemoryAllocator::Setup(MaxCapacity())) return false;
- void* chunk
- = MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
+ void* chunk =
+ MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
if (chunk == NULL) return false;
- // Put the initial chunk of the old space at the start of the initial
- // chunk, then the two new space semispaces, then the initial chunk of
- // code space. Align the pair of semispaces to their size, which must be
- // a power of 2.
+ // Align the pair of semispaces to their size, which must be a power
+ // of 2.
ASSERT(IsPowerOf2(young_generation_size_));
- Address code_space_start = reinterpret_cast<Address>(chunk);
- Address new_space_start = RoundUp(code_space_start, young_generation_size_);
- Address old_space_start = new_space_start + young_generation_size_;
- int code_space_size = new_space_start - code_space_start;
- int old_space_size = young_generation_size_ - code_space_size;
-
- // Initialize new space.
+ Address new_space_start =
+ RoundUp(reinterpret_cast<byte*>(chunk), young_generation_size_);
if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
- // Initialize old space, set the maximum capacity to the old generation
- // size. It will not contain code.
+ // Initialize old pointer space.
old_pointer_space_ =
new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
- return false;
- }
+ if (!old_pointer_space_->Setup(NULL, 0)) return false;
+
+ // Initialize old data space.
old_data_space_ =
new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
- old_space_size >> 1)) {
- return false;
- }
+ if (!old_data_space_->Setup(NULL, 0)) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
+ // On 64-bit platform(s), we put all code objects in a 2 GB range of
+ // virtual address space, so that they can call each other with near calls.
+ if (code_range_size_ > 0) {
+ if (!CodeRange::Setup(code_range_size_)) {
+ return false;
+ }
+ }
+
code_space_ =
new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
- if (!code_space_->Setup(code_space_start, code_space_size)) return false;
+ if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
if (map_space_ == NULL) return false;
- // Setting up a paged space without giving it a virtual memory range big
- // enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
- // Setting up a paged space without giving it a virtual memory range big
- // enough to hold at least a page will cause it to allocate.
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
@@ -3553,163 +3598,6 @@ void HeapIterator::reset() {
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// JSConstructorProfile is responsible for gathering and logging
-// "constructor profile" of JS object allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class JSConstructorProfile BASE_EMBEDDED {
- public:
- JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {}
- void CollectStats(JSObject* obj);
- void PrintStats();
- // Used by ZoneSplayTree::ForEach.
- void Call(String* name, const NumberAndSizeInfo& number_and_size);
- private:
- struct TreeConfig {
- typedef String* Key;
- typedef NumberAndSizeInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- // Strings are unique, so it is sufficient to compare their pointers.
- static int Compare(const Key& a, const Key& b) {
- return a == b ? 0 : (a < b ? -1 : 1);
- }
- };
-
- typedef ZoneSplayTree<TreeConfig> JSObjectsInfoTree;
- static int CalculateJSObjectNetworkSize(JSObject* obj);
-
- ZoneScope zscope_;
- JSObjectsInfoTree js_objects_info_tree_;
-};
-
-const JSConstructorProfile::TreeConfig::Key
- JSConstructorProfile::TreeConfig::kNoKey = NULL;
-const JSConstructorProfile::TreeConfig::Value
- JSConstructorProfile::TreeConfig::kNoValue;
-
-
-int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) {
- int size = obj->Size();
- // If 'properties' and 'elements' are non-empty (thus, non-shared),
- // take their size into account.
- if (FixedArray::cast(obj->properties())->length() != 0) {
- size += obj->properties()->Size();
- }
- if (FixedArray::cast(obj->elements())->length() != 0) {
- size += obj->elements()->Size();
- }
- return size;
-}
-
-
-void JSConstructorProfile::Call(String* name,
- const NumberAndSizeInfo& number_and_size) {
- SmartPointer<char> s_name;
- if (name != NULL) {
- s_name = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- }
- LOG(HeapSampleJSConstructorEvent(*s_name,
- number_and_size.number(),
- number_and_size.bytes()));
-}
-
-
-void JSConstructorProfile::CollectStats(JSObject* obj) {
- String* constructor_func = NULL;
- if (obj->map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- SharedFunctionInfo* sfi = constructor->shared();
- String* name = String::cast(sfi->name());
- constructor_func = name->length() > 0 ? name : sfi->inferred_name();
- } else if (obj->IsJSFunction()) {
- constructor_func = Heap::function_class_symbol();
- }
- JSObjectsInfoTree::Locator loc;
- if (!js_objects_info_tree_.Find(constructor_func, &loc)) {
- js_objects_info_tree_.Insert(constructor_func, &loc);
- }
- NumberAndSizeInfo number_and_size = loc.value();
- number_and_size.increment_number(1);
- number_and_size.increment_bytes(CalculateJSObjectNetworkSize(obj));
- loc.set_value(number_and_size);
-}
-
-
-void JSConstructorProfile::PrintStats() {
- js_objects_info_tree_.ForEach(this);
-}
-
-} // namespace
-#endif
-
-
-//
-// HeapProfiler class implementation.
-//
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- info[type].increment_number(1);
- info[type].increment_bytes(obj->Size());
-}
-#endif
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void HeapProfiler::WriteSample() {
- LOG(HeapSampleBeginEvent("Heap", "allocated"));
- LOG(HeapSampleStats(
- "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
-
- HistogramInfo info[LAST_TYPE+1];
-#define DEF_TYPE_NAME(name) info[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
- JSConstructorProfile js_cons_profile;
- HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CollectStats(obj, info);
- if (obj->IsJSObject()) {
- js_cons_profile.CollectStats(JSObject::cast(obj));
- }
- }
-
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT_SIZE)
-#undef INCREMENT_SIZE
- if (string_bytes > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].bytes() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
-
- js_cons_profile.PrintStats();
-
- LOG(HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-#endif
-
-
-
#ifdef DEBUG
static bool search_for_any_global;
@@ -3752,10 +3640,6 @@ static void MarkObjectRecursively(Object** p) {
return;
}
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromAddressToObject();
- }
-
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -3811,10 +3695,6 @@ static void UnmarkObjectRecursively(Object** p) {
obj->IterateBody(Map::cast(map_p)->instance_type(),
obj->SizeFromMap(Map::cast(map_p)),
&unmark_visitor);
-
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
}
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index 028dd11..e878efc 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -882,11 +882,14 @@ class Heap : public AllStatic {
kRootListLength
};
+ static Object* NumberToString(Object* number);
+
private:
static int semispace_size_;
static int initial_semispace_size_;
static int young_generation_size_;
static int old_generation_size_;
+ static size_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1443,20 +1446,6 @@ class DisableAssertNoAllocation {
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-// The HeapProfiler writes data to the log files, which can be postprocessed
-// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
-class HeapProfiler {
- public:
- // Write a single heap sample to the log file.
- static void WriteSample();
-
- private:
- // Update the array info with stats from obj.
- static void CollectStats(HeapObject* obj, HistogramInfo* info);
-};
-#endif
-
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.cc b/V8Binding/v8/src/ia32/assembler-ia32.cc
index 02bde2a..b8dda17 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/assembler-ia32.cc
@@ -157,6 +157,9 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
for (int i = 0; i < instruction_count; i++) {
*(pc_ + i) = *(instructions + i);
}
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count);
}
@@ -164,12 +167,25 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Call instruction takes up 5 bytes and int3 takes up one byte.
- int code_size = 5 + guard_bytes;
+ static const int kCallCodeSize = 5;
+ int code_size = kCallCodeSize + guard_bytes;
- // Patch the code.
+ // Create a code patcher.
CodePatcher patcher(pc_, code_size);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+
+ // Patch the code.
patcher.masm()->call(target, RelocInfo::NONE);
+ // Check that the size of the code generated is as expected.
+ ASSERT_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
// Add the requested number of int3 instructions after the call.
for (int i = 0; i < guard_bytes; i++) {
patcher.masm()->int3();
@@ -721,10 +737,10 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- UNIMPLEMENTED();
- USE(cc);
- USE(dst);
- USE(src);
+ // Opcode: 0f 40 + cc /r
+ EMIT(0x0F);
+ EMIT(0x40 + cc);
+ emit_operand(dst, src);
}
@@ -866,6 +882,13 @@ void Assembler::cmp(const Operand& op, const Immediate& imm) {
}
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_arith(7, op, Immediate(handle));
+}
+
+
void Assembler::cmpb_al(const Operand& op) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1947,6 +1970,17 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x2F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.h b/V8Binding/v8/src/ia32/assembler-ia32.h
index 6a90e07..610017b 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/assembler-ia32.h
@@ -437,7 +437,7 @@ class Assembler : public Malloced {
// Distance between the address of the code target in the call instruction
// and the return address
- static const int kPatchReturnSequenceLength = kPointerSize;
+ static const int kCallTargetAddressOffset = kPointerSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
@@ -539,6 +539,7 @@ class Assembler : public Malloced {
void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg, const Operand& op);
void cmp(const Operand& op, const Immediate& imm);
+ void cmp(const Operand& op, Handle<Object> handle);
void dec_b(Register dst);
@@ -719,6 +720,8 @@ class Assembler : public Malloced {
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+ void comisd(XMMRegister dst, XMMRegister src);
+
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc
index 7793e49..ad44026 100644
--- a/V8Binding/v8/src/ia32/builtins-ia32.cc
+++ b/V8Binding/v8/src/ia32/builtins-ia32.cc
@@ -42,10 +42,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ mov(Operand::StaticVariable(passed), edi);
// The actual argument count has already been loaded into register
- // eax, but JumpToBuiltin expects eax to contain the number of
+ // eax, but JumpToRuntime expects eax to contain the number of
// arguments including the receiver.
__ inc(eax);
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id));
}
@@ -129,12 +129,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(edi,
- ebx,
- edi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -189,14 +184,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// ebx: JSObject
@@ -658,6 +653,467 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+ __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ mov(result,
+ Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity >= 0);
+
+ // Load the initial map from the array function.
+ __ mov(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (initial_capacity == 0) {
+ __ mov(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ __ mov(scratch3, Factory::the_hole_value());
+ for (int i = 0; i < initial_capacity; i++) {
+ __ mov(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+ __ add(Operand(scratch1), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(scratch1, Operand(scratch2));
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ mov(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ test(array_size, Operand(array_size));
+ __ j(not_zero, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ mov(elements_array, Factory::empty_fixed_array());
+ __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ shr(array_size, kSmiTagSize); // Convert from smi to value.
+ __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ test(array_size, Operand(array_size));
+ __ j(not_zero, &not_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ mov(FieldOperand(elements_array, Array::kLengthOffset),
+ Immediate(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(&not_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ mov(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(elements_array, 0), scratch);
+ __ add(Operand(elements_array), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(elements_array, Operand(elements_array_end));
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// edi: constructor (built-in Array function)
+// eax: argc
+// esp[0]: return address
+// esp[4]: last argument
+// This function is used for both construct and normal calls of Array. Whether
+// it is a construct call or not is indicated by the construct_call parameter.
+// The only difference between handling a construct call and a normal call is
+// that for a construct call the constructor function in edi needs to be
+// preserved for entering the generic code. In both cases argc in eax needs to
+// be preserved.
+static void ArrayNativeCode(MacroAssembler* masm,
+ bool construct_call,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+
+ // Push the constructor and argc. No need to tag argc as a smi, as there will
+ // be no garbage collection with this on the stack.
+ int push_count = 0;
+ if (construct_call) {
+ push_count++;
+ __ push(edi);
+ }
+ push_count++;
+ __ push(eax);
+
+ // Check for array construction with zero arguments.
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ edi,
+ eax,
+ ebx,
+ ecx,
+ edi,
+ kPreallocatedArrayElements,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(eax, 1);
+ __ j(not_equal, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ test(Operand(esp, (push_count + 1) * kPointerSize),
+ Immediate(kIntptrSignBit | kSmiTagMask));
+ __ j(not_zero, &prepare_generic_code_call);
+
+ // Handle construction of an empty array of a certain size. Get the size from
+ // the stack and bail out if size is to large to actually allocate an elements
+ // array.
+ __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
+ ASSERT(kSmiTag == 0);
+ __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+ __ j(greater_equal, &prepare_generic_code_call);
+
+ // edx: array_size (smi)
+ // edi: constructor
+ // esp[0]: argc
+ // esp[4]: constructor (only if construct_call)
+ // esp[8]: return address
+ // esp[C]: argument
+ AllocateJSArray(masm,
+ edi,
+ edx,
+ eax,
+ ebx,
+ ecx,
+ edi,
+ true,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ shl(eax, kSmiTagSize); // Convet argc to a smi.
+ // eax: array_size (smi)
+ // edi: constructor
+ // esp[0] : argc
+ // esp[4]: constructor (only if construct_call)
+ // esp[8] : return address
+ // esp[C] : last argument
+ AllocateJSArray(masm,
+ edi,
+ eax,
+ ebx,
+ ecx,
+ edx,
+ edi,
+ false,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ mov(eax, ebx);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ push(eax);
+ // eax: JSArray
+ // ebx: argc
+ // edx: elements_array_end (untagged)
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(edi, Operand(esp, 2 * kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+ __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // ebx: argc
+ // edx: location of the first array element
+ // edi: location of the last argument
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ mov(ecx, ebx);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ __ mov(Operand(edx, 0), eax);
+ __ add(Operand(edx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // ebx: argc
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+ __ pop(eax);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+ __ push(ecx);
+ __ ret(0);
+
+ // Restore argc and constructor before running the generic code.
+ __ bind(&prepare_generic_code_call);
+ __ pop(eax);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ jmp(call_generic_code);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ GenerateLoadArrayFunction(masm, ebx);
+ __ cmp(edi, Operand(ebx));
+ __ Assert(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, true, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, Operand(esp));
diff --git a/V8Binding/v8/src/ia32/cfg-ia32.cc b/V8Binding/v8/src/ia32/cfg-ia32.cc
deleted file mode 100644
index 58985a5..0000000
--- a/V8Binding/v8/src/ia32/cfg-ia32.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-ia32.h"
-#include "macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(ebp);
- __ mov(ebp, esp);
- __ push(esi);
- __ push(edi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ Set(eax, Immediate(Factory::undefined_value()));
- for (int i = 0; i < count; i++) {
- __ push(eax);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ mov(esp, ebp);
- __ pop(ebp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test eax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one here.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- __ pop(ebx); // Discard key.
- } else {
- key()->Get(masm, ecx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(ebx); // Discard receiver.
- location()->Set(masm, eax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, eax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value_->Get(masm, eax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, Immediate(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ push(Immediate(handle_));
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(ebp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ mov(ToOperand(loc), Immediate(handle_));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ mov(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ mov(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant)
- // we can use a single instruction.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- // The accumulator is not live across a MoveInstr.
- __ mov(eax, ToOperand(this));
- __ mov(ToOperand(loc), eax);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(reg, eax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(eax)) __ mov(eax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(eax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, eax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ mov(ToOperand(loc), eax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc
index c2728d7..0e314b9 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.cc
+++ b/V8Binding/v8/src/ia32/codegen-ia32.cc
@@ -768,6 +768,11 @@ class FloatingPointHelper : public AllStatic {
static void CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch);
+ // Test if operands are numbers (smi or HeapNumber objects), and load
+ // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
+ // either operand is not a number. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
// Allocate a heap number in new space with undefined value.
// Returns tagged pointer in eax, or jumps to need_gc if new space is full.
static void AllocateHeapNumber(MacroAssembler* masm,
@@ -2300,7 +2305,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -2539,10 +2543,12 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
}
@@ -4328,7 +4334,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -4410,8 +4415,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
@@ -4428,12 +4431,10 @@ void CodeGenerator::VisitProperty(Property* node) {
void CodeGenerator::VisitCall(Call* node) {
Comment cmnt(masm_, "[ Call");
+ Expression* function = node->expression();
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
- Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
Property* property = function->AsProperty();
@@ -4446,7 +4447,63 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ mov(result.reg(),
+ FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -4586,7 +4643,6 @@ void CodeGenerator::VisitCall(Call* node) {
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -4616,66 +4672,6 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ mov(result.reg(),
- FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -6699,41 +6695,79 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: {
// eax: y
// edx: x
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- // Fast-case: Both operands are numbers.
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- edx,
- eax);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
+ if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+ CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+ FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ ecx,
+ edx,
+ eax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ ecx,
+ edx,
+ eax);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
}
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
@@ -6886,7 +6920,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &string1);
// First and second argument are strings.
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
// Only first argument is a string.
__ bind(&string1);
@@ -6949,12 +6983,12 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch2,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
// Set the map.
__ mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -6981,6 +7015,38 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ // Load operand in edx into xmm0, or branch to not_numbers.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, not_numbers); // Argument in edx is not a number.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1, or branch to not_numbers.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(equal, &load_float_eax);
+ __ jmp(not_numbers); // Argument in eax is not a number.
+ __ bind(&load_smi_edx);
+ __ sar(edx, 1); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ shl(edx, 1); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+ __ bind(&load_smi_eax);
+ __ sar(eax, 1); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ shl(eax, 1); // Retag smi for heap number overwriting test.
+ __ jmp(&done);
+ __ bind(&load_float_eax);
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&done);
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register scratch) {
Label load_smi_1, load_smi_2, done_load_1, done;
@@ -7175,7 +7241,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
}
@@ -7200,7 +7266,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
}
@@ -7343,28 +7409,56 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Inlined floating point compare.
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
- FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
- __ FCmp();
+ Label unordered;
+ if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+ CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+ CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+
+ FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+ __ comisd(xmm0, xmm1);
+
+ // Jump to builtin for NaN.
+ __ j(parity_even, &unordered, not_taken);
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, Operand(ecx));
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, Operand(ecx));
+ __ ret(2 * kPointerSize);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+ __ FCmp();
- // Jump to builtin for NaN.
- __ j(parity_even, &call_builtin, not_taken);
+ // Jump to builtin for NaN.
+ __ j(parity_even, &unordered, not_taken);
- // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
- Label below_lbl, above_lbl;
- // use edx, eax to convert unsigned to signed comparison
- __ j(below, &below_lbl, not_taken);
- __ j(above, &above_lbl, not_taken);
+ Label below_lbl, above_lbl;
+ // Return a result of -1, 0, or 1, to indicate result of comparison.
+ __ j(below, &below_lbl, not_taken);
+ __ j(above, &above_lbl, not_taken);
- __ xor_(eax, Operand(eax)); // equal
- __ ret(2 * kPointerSize);
+ __ xor_(eax, Operand(eax)); // equal
+ // Both arguments were pushed in case a runtime call was needed.
+ __ ret(2 * kPointerSize);
- __ bind(&below_lbl);
- __ mov(eax, -1);
- __ ret(2 * kPointerSize);
+ __ bind(&below_lbl);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(2 * kPointerSize);
- __ bind(&above_lbl);
- __ mov(eax, 1);
+ __ bind(&above_lbl);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(2 * kPointerSize); // eax, edx were pushed
+ }
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
__ ret(2 * kPointerSize); // eax, edx were pushed
// Fast negative check for symbol-to-symbol equality.
@@ -7436,7 +7530,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ push(eax);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
}
@@ -7468,6 +7562,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+int CEntryStub::MinorKey() {
+ ASSERT(result_size_ <= 2);
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+ return 0;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.h b/V8Binding/v8/src/ia32/codegen-ia32.h
index afdbffe..142a5a1 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32.h
@@ -553,7 +553,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/V8Binding/v8/src/ia32/debug-ia32.cc b/V8Binding/v8/src/ia32/debug-ia32.cc
index 4ef0862..7e0dfd1 100644
--- a/V8Binding/v8/src/ia32/debug-ia32.cc
+++ b/V8Binding/v8/src/ia32/debug-ia32.cc
@@ -36,9 +36,7 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
-// A debug break in the frame exit code is identified by a call instruction.
bool BreakLocationIterator::IsDebugBreakAtReturn() {
- // Opcode E8 is call.
return Debug::IsDebugBreakAtReturn(rinfo());
}
@@ -49,7 +47,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Debug::kIa32JSReturnSequenceLength >=
Debug::kIa32CallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return_entry()->entry(),
+ rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
}
@@ -61,11 +59,11 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
}
-// Check whether the JS frame exit code has been patched with a debug break.
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- // Opcode E8 is call.
- return (*(rinfo->pc()) == 0xE8);
+ return rinfo->IsCallInstruction();
}
@@ -194,17 +192,6 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
- // OK to clobber ebx as we are returning from a JS function through the code
- // generated by CodeGenerator::GenerateReturnSequence()
- ExternalReference debug_break_return =
- ExternalReference(Debug_Address::DebugBreakReturn());
- __ mov(ebx, Operand::StaticVariable(debug_break_return));
- __ add(Operand(ebx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(ebx));
-}
-
-
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
// ----------- S t a t e -------------
diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc
index e39808b..f7369a8 100644
--- a/V8Binding/v8/src/ia32/ic-ia32.cc
+++ b/V8Binding/v8/src/ia32/ic-ia32.cc
@@ -404,7 +404,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ push(eax);
__ push(ecx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
// Check whether the elements is a pixel array.
// eax: value
@@ -421,21 +421,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
+ __ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
{ // Clamp the value to [0..255].
- Label done, check_255;
- __ cmp(eax, 0);
- __ j(greater_equal, &check_255);
- __ mov(eax, Immediate(0));
- __ jmp(&done);
- __ bind(&check_255);
- __ cmp(eax, 255);
- __ j(less_equal, &done);
+ Label done, is_negative;
+ __ test(eax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
__ mov(eax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xor_(eax, Operand(eax)); // Clear eax.
__ bind(&done);
}
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ mov(eax, edx); // Return the original value.
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
@@ -667,7 +668,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ push(ebx);
// Call the entry.
- CEntryStub stub;
+ CEntryStub stub(1);
__ mov(eax, Immediate(2));
__ mov(ebx, Immediate(f));
__ CallStub(&stub);
@@ -799,7 +800,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -840,7 +841,7 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
- address + Assembler::kPatchReturnSequenceLength;
+ address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -867,7 +868,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address =
- address + Assembler::kPatchReturnSequenceLength;
+ address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -927,7 +928,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ebx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -967,7 +968,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Perform tail call to the entry.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
@@ -987,7 +988,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ebx);
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
@@ -1010,7 +1011,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ecx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
@@ -1032,7 +1033,7 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Do tail-call to runtime routine.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
#undef __
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index 241275d..e83bb92 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -319,7 +319,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() {
- fcompp();
+ fucompp();
push(eax);
fnstsw_ax();
sahf();
@@ -664,12 +664,12 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -692,14 +692,14 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -722,12 +722,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int header_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -896,20 +896,21 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
- int num_arguments) {
+ int num_arguments,
+ int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
- CEntryStub ces;
+ CEntryStub ces(1);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -1170,7 +1171,7 @@ void MacroAssembler::Abort(const char* msg) {
CodePatcher::CodePatcher(byte* address, int size)
- : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.h b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
index 21b2eb5..ed72c96 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
@@ -175,30 +175,30 @@ class MacroAssembler: public Assembler {
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the contnt of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -254,12 +254,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
- void TailCallRuntime(const ExternalReference& ext, int num_arguments);
+ void TailCallRuntime(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext);
// ---------------------------------------------------------------------------
@@ -319,8 +321,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
diff --git a/V8Binding/v8/src/ia32/simulator-ia32.h b/V8Binding/v8/src/ia32/simulator-ia32.h
index 3bed268..8fa4287 100644
--- a/V8Binding/v8/src/ia32/simulator-ia32.h
+++ b/V8Binding/v8/src/ia32/simulator-ia32.h
@@ -28,21 +28,22 @@
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index 74c982f..ca4e142 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -302,7 +302,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ mov(eax, Immediate(5));
__ mov(ebx, Immediate(ref));
- CEntryStub stub;
+ CEntryStub stub(1);
__ CallStub(&stub);
}
@@ -467,7 +467,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(ref, 5);
+ __ TailCallRuntime(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@@ -489,7 +489,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(ref, 5);
+ __ TailCallRuntime(ref, 5, 1);
}
private:
@@ -593,7 +593,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ mov(eax, Immediate(5));
__ mov(ebx, Immediate(ref));
- CEntryStub stub;
+ CEntryStub stub(1);
__ CallStub(&stub);
__ LeaveInternalFrame();
@@ -789,7 +789,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5);
+ __ TailCallRuntime(load_callback_property, 5, 1);
}
@@ -1237,7 +1237,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4);
+ __ TailCallRuntime(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1290,7 +1290,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3);
+ __ TailCallRuntime(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1783,12 +1783,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// ebx: initial map
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ AllocateObjectInNewSpace(ecx,
- edx,
- ecx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(ecx,
+ edx,
+ ecx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
diff --git a/V8Binding/v8/src/ic-inl.h b/V8Binding/v8/src/ic-inl.h
index 38d61dc..131f77b 100644
--- a/V8Binding/v8/src/ic-inl.h
+++ b/V8Binding/v8/src/ic-inl.h
@@ -38,7 +38,7 @@ namespace internal {
Address IC::address() {
// Get the address of the call.
- Address result = pc() - Assembler::kPatchReturnSequenceLength;
+ Address result = pc() - Assembler::kCallTargetAddressOffset;
#ifdef ENABLE_DEBUGGER_SUPPORT
// First check if any break points are active if not just return the address
diff --git a/V8Binding/v8/src/ic.cc b/V8Binding/v8/src/ic.cc
index 393ccbf..264b99c 100644
--- a/V8Binding/v8/src/ic.cc
+++ b/V8Binding/v8/src/ic.cc
@@ -122,7 +122,7 @@ Address IC::OriginalCodeAddress() {
// Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC
// normally would be.
- Address addr = pc() - Assembler::kPatchReturnSequenceLength;
+ Address addr = pc() - Assembler::kCallTargetAddressOffset;
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
diff --git a/V8Binding/v8/src/ic.h b/V8Binding/v8/src/ic.h
index 007b035..fcf1ec0 100644
--- a/V8Binding/v8/src/ic.h
+++ b/V8Binding/v8/src/ic.h
@@ -390,7 +390,7 @@ class KeyedStoreIC: public IC {
// Support for patching the map that is checked in an inlined
// version of keyed store.
// The address is the patch point for the IC call
- // (Assembler::kPatchReturnSequenceLength before the end of
+ // (Assembler::kCallTargetAddressOffset before the end of
// the call/return address).
// The map is the new map that the inlined code should check against.
static bool PatchInlinedStore(Address address, Object* map);
diff --git a/V8Binding/v8/src/list.h b/V8Binding/v8/src/list.h
index dd7ea1c..25211d9 100644
--- a/V8Binding/v8/src/list.h
+++ b/V8Binding/v8/src/list.h
@@ -51,6 +51,13 @@ class List {
INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); }
+ // Deallocates memory used by the list and leaves the list in a consistent
+ // empty state.
+ void Free() {
+ DeleteData(data_);
+ Initialize(0);
+ }
+
INLINE(void* operator new(size_t size)) { return P::New(size); }
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
diff --git a/V8Binding/v8/src/log-utils.cc b/V8Binding/v8/src/log-utils.cc
index b31864b..f327a0a 100644
--- a/V8Binding/v8/src/log-utils.cc
+++ b/V8Binding/v8/src/log-utils.cc
@@ -163,7 +163,7 @@ void Log::OpenMemoryBuffer() {
void Log::Close() {
if (Write == WriteToFile) {
- fclose(output_handle_);
+ if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
} else if (Write == WriteToMemory) {
delete output_buffer_;
@@ -310,6 +310,20 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
}
+void LogMessageBuilder::AppendStringPart(const char* str, int len) {
+ if (pos_ + len > Log::kMessageBufferSize) {
+ len = Log::kMessageBufferSize - pos_;
+ ASSERT(len >= 0);
+ if (len == 0) return;
+ }
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ OS::StrNCpy(buf, str, len);
+ pos_ += len;
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
}
diff --git a/V8Binding/v8/src/log-utils.h b/V8Binding/v8/src/log-utils.h
index ad669d5..117f098 100644
--- a/V8Binding/v8/src/log-utils.h
+++ b/V8Binding/v8/src/log-utils.h
@@ -114,6 +114,9 @@ class Log : public AllStatic {
return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
}
+ // Size of buffer used for formatting log messages.
+ static const int kMessageBufferSize = 2048;
+
private:
typedef int (*WritePtr)(const char* msg, int length);
@@ -162,9 +165,6 @@ class Log : public AllStatic {
// access to the formatting buffer and the log file or log memory buffer.
static Mutex* mutex_;
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
-
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
static char* message_buffer_;
@@ -247,6 +247,9 @@ class LogMessageBuilder BASE_EMBEDDED {
void AppendDetailed(String* str, bool show_impl_info);
+ // Append a portion of a string.
+ void AppendStringPart(const char* str, int len);
+
// Stores log message into compressor, returns true if the message
// was stored (i.e. doesn't repeat the previous one).
bool StoreInCompressor(LogRecordCompressor* compressor);
diff --git a/V8Binding/v8/src/log.cc b/V8Binding/v8/src/log.cc
index 5680820..d225c3b 100644
--- a/V8Binding/v8/src/log.cc
+++ b/V8Binding/v8/src/log.cc
@@ -889,16 +889,51 @@ void Logger::HeapSampleJSConstructorEvent(const char* constructor,
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
- msg.Append("heap-js-cons-item,%s,%d,%d\n",
- constructor != NULL ?
- (constructor[0] != '\0' ? constructor : "(anonymous)") :
- "(no_constructor)",
- number, bytes);
+ msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
msg.WriteToLogFile();
#endif
}
+void Logger::HeapSampleJSRetainersEvent(
+ const char* constructor, const char* event) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
+ // Event starts with comma, so we don't have it in the format string.
+ static const char* event_text = "heap-js-ret-item,%s";
+ // We take placeholder strings into account, but it's OK to be conservative.
+ static const int event_text_len = strlen(event_text);
+ const int cons_len = strlen(constructor), event_len = strlen(event);
+ int pos = 0;
+ // Retainer lists can be long. We may need to split them into multiple events.
+ do {
+ LogMessageBuilder msg;
+ msg.Append(event_text, constructor);
+ int to_write = event_len - pos;
+ if (to_write > Log::kMessageBufferSize - (cons_len + event_text_len)) {
+ int cut_pos = pos + Log::kMessageBufferSize - (cons_len + event_text_len);
+ ASSERT(cut_pos < event_len);
+ while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
+ if (event[cut_pos] != ',') {
+ // Crash in debug mode, skip in release mode.
+ ASSERT(false);
+ return;
+ }
+ // Append a piece of event that fits, without trailing comma.
+ msg.AppendStringPart(event + pos, cut_pos - pos);
+ // Start next piece with comma.
+ pos = cut_pos;
+ } else {
+ msg.Append("%s", event + pos);
+ pos += event_len;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+ } while (pos < event_len);
+#endif
+}
+
+
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
diff --git a/V8Binding/v8/src/log.h b/V8Binding/v8/src/log.h
index 89f6cdb..07a0429 100644
--- a/V8Binding/v8/src/log.h
+++ b/V8Binding/v8/src/log.h
@@ -221,6 +221,8 @@ class Logger {
static void HeapSampleItemEvent(const char* type, int number, int bytes);
static void HeapSampleJSConstructorEvent(const char* constructor,
int number, int bytes);
+ static void HeapSampleJSRetainersEvent(const char* constructor,
+ const char* event);
static void HeapSampleStats(const char* space, const char* kind,
int capacity, int used);
diff --git a/V8Binding/v8/src/macro-assembler.h b/V8Binding/v8/src/macro-assembler.h
index 5631dec..63a6d6e 100644
--- a/V8Binding/v8/src/macro-assembler.h
+++ b/V8Binding/v8/src/macro-assembler.h
@@ -50,7 +50,7 @@ enum HandlerType {
};
-// Flags used for the AllocateObjectInNewSpace functions.
+// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
diff --git a/V8Binding/v8/src/mark-compact.cc b/V8Binding/v8/src/mark-compact.cc
index e682fe2..cbd47a8 100644
--- a/V8Binding/v8/src/mark-compact.cc
+++ b/V8Binding/v8/src/mark-compact.cc
@@ -265,21 +265,9 @@ class MarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
- void BeginCodeIteration(Code* code) {
- // When iterating over a code object during marking
- // ic targets are derived pointers.
- ASSERT(code->ic_flag() == Code::IC_TARGET_IS_ADDRESS);
- }
-
- void EndCodeIteration(Code* code) {
- // If this is a compacting collection, set ic targets
- // are pointing to object headers.
- if (IsCompacting()) code->set_ic_flag(Code::IC_TARGET_IS_OBJECT);
- }
-
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* code = CodeFromDerivedPointer(rinfo->target_address());
+ Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be
@@ -287,17 +275,12 @@ class MarkingVisitor : public ObjectVisitor {
} else {
MarkCompactCollector::MarkObject(code);
}
- if (IsCompacting()) {
- // When compacting we convert the target to a real object pointer.
- code = CodeFromDerivedPointer(rinfo->target_address());
- rinfo->set_target_object(code);
- }
}
void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsCallInstruction());
- HeapObject* code = CodeFromDerivedPointer(rinfo->call_address());
+ HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
MarkCompactCollector::MarkObject(code);
// When compacting we convert the call to a real object pointer.
if (IsCompacting()) rinfo->set_call_object(code);
@@ -314,13 +297,6 @@ class MarkingVisitor : public ObjectVisitor {
// Tells whether the mark sweep collection will perform compaction.
bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
- // Retrieves the Code pointer from derived code entry.
- Code* CodeFromDerivedPointer(Address addr) {
- ASSERT(addr != NULL);
- return reinterpret_cast<Code*>(
- HeapObject::FromAddress(addr - Code::kHeaderSize));
- }
-
// Visit an unmarked object.
void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
@@ -1194,12 +1170,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
- if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
- // If this is compacting collection marked code objects have had
- // their IC targets converted to objects.
- // They need to be converted back to addresses.
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- }
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, current - free_start);
is_previous_alive = true;
@@ -1405,6 +1375,14 @@ class UpdatingVisitor: public ObjectVisitor {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VisitPointer(&target);
+ rinfo->set_target_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
+ }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -1638,11 +1616,6 @@ void MarkCompactCollector::RelocateObjects() {
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
- // Notify code object in LO to convert IC target to address
- // This must happen after lo_space_->Compact
- LargeObjectIterator it(Heap::lo_space());
- while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
-
// Flip from and to spaces
Heap::new_space()->Flip();
@@ -1661,14 +1634,6 @@ void MarkCompactCollector::RelocateObjects() {
}
-int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
- return obj->Size();
-}
-
-
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
@@ -1777,11 +1742,6 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Reset the map pointer.
int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
- // Convert inline cache target to address using old address.
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
-
Address old_addr = obj->address();
if (new_addr != old_addr) {
diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js
index 255e544..2720792 100644
--- a/V8Binding/v8/src/messages.js
+++ b/V8Binding/v8/src/messages.js
@@ -32,6 +32,11 @@
var kVowelSounds = 0;
var kCapitalVowelSounds = 0;
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
function GetInstanceName(cons) {
if (cons.length == 0) {
@@ -167,7 +172,8 @@ function FormatMessage(message) {
no_input_to_regexp: "No input to %0",
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
- circular_structure: "Converting circular structure to JSON"
+ circular_structure: "Converting circular structure to JSON",
+ object_keys_non_object: "Object.keys called on non-object"
};
}
var format = kMessages[message.type];
@@ -564,11 +570,6 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
// Defines accessors for a property that is calculated the first time
// the property is read.
function DefineOneShotAccessor(obj, name, fun) {
@@ -780,14 +781,15 @@ function FormatStackTrace(error, frames) {
}
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
+ var line;
try {
- var line = FormatSourcePosition(frame);
+ line = FormatSourcePosition(frame);
} catch (e) {
try {
- var line = "<error: " + e + ">";
+ line = "<error: " + e + ">";
} catch (ee) {
// Any code that reaches this point is seriously nasty!
- var line = "<error>";
+ line = "<error>";
}
}
lines.push(" at " + line);
diff --git a/V8Binding/v8/src/mirror-delay.js b/V8Binding/v8/src/mirror-delay.js
index 76ae75b..c4ab7b8 100644
--- a/V8Binding/v8/src/mirror-delay.js
+++ b/V8Binding/v8/src/mirror-delay.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// jsminify this file, js2c: jsmin
-
// Touch the RegExp and Date functions to make sure that date-delay.js and
// regexp-delay.js has been loaded. This is required as the mirrors use
// functions within these files through the builtins object.
@@ -201,7 +199,8 @@ PropertyAttribute.DontDelete = DONT_DELETE;
ScopeType = { Global: 0,
Local: 1,
With: 2,
- Closure: 3 };
+ Closure: 3,
+ Catch: 4 };
// Mirror hierarchy:
diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc
index 9fc9b1d..288cc21 100644
--- a/V8Binding/v8/src/objects-debug.cc
+++ b/V8Binding/v8/src/objects-debug.cc
@@ -733,7 +733,6 @@ void Code::CodePrint() {
void Code::CodeVerify() {
- CHECK(ic_flag() == IC_TARGET_IS_ADDRESS);
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
static_cast<intptr_t>(kCodeAlignment)));
Address last_gc_pc = NULL;
diff --git a/V8Binding/v8/src/objects-inl.h b/V8Binding/v8/src/objects-inl.h
index cabc8a2..29b886d 100644
--- a/V8Binding/v8/src/objects-inl.h
+++ b/V8Binding/v8/src/objects-inl.h
@@ -2538,16 +2538,6 @@ INT_ACCESSORS(Code, relocation_size, kRelocationSizeOffset)
INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
-Code::ICTargetState Code::ic_flag() {
- return static_cast<ICTargetState>(READ_BYTE_FIELD(this, kICFlagOffset));
-}
-
-
-void Code::set_ic_flag(ICTargetState value) {
- WRITE_BYTE_FIELD(this, kICFlagOffset, value);
-}
-
-
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
diff --git a/V8Binding/v8/src/objects.cc b/V8Binding/v8/src/objects.cc
index 9ea131f..e2fa3b5 100644
--- a/V8Binding/v8/src/objects.cc
+++ b/V8Binding/v8/src/objects.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
@@ -158,14 +159,12 @@ Object* Object::GetPropertyWithCallback(Object* receiver,
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope;
- Handle<JSObject> self(JSObject::cast(receiver));
- Handle<JSObject> holder_handle(JSObject::cast(holder));
+ JSObject* self = JSObject::cast(receiver);
+ JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
- LOG(ApiNamedPropertyAccess("load", *self, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ LOG(ApiNamedPropertyAccess("load", self, name));
+ CustomArguments args(data->data(), self, holder_handle);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -476,6 +475,21 @@ Object* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
}
+bool JSObject::IsDirty() {
+ Object* cons_obj = map()->constructor();
+ if (!cons_obj->IsJSFunction())
+ return true;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ if (!fun->shared()->function_data()->IsFunctionTemplateInfo())
+ return true;
+ // If the object is fully fast case and has the same map it was
+ // created with then no changes can have been made to it.
+ return map() != fun->initial_map()
+ || !HasFastElements()
+ || !HasFastProperties();
+}
+
+
Object* Object::GetProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -1186,7 +1200,9 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
String* JSObject::class_name() {
- if (IsJSFunction()) return Heap::function_class_symbol();
+ if (IsJSFunction()) {
+ return Heap::function_class_symbol();
+ }
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
return String::cast(constructor->shared()->instance_class_name());
@@ -1196,6 +1212,20 @@ String* JSObject::class_name() {
}
+String* JSObject::constructor_name() {
+ if (IsJSFunction()) {
+ return Heap::function_class_symbol();
+ }
+ if (map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(map()->constructor());
+ String* name = String::cast(constructor->shared()->name());
+ return name->length() > 0 ? name : constructor->shared()->inferred_name();
+ }
+ // If the constructor is not present, return "Object".
+ return Heap::Object_symbol();
+}
+
+
void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body. Assumes all are Object*.
IteratePointers(v, kPropertiesOffset, object_size);
@@ -1507,11 +1537,9 @@ Object* JSObject::SetPropertyWithInterceptor(String* name,
Handle<Object> value_handle(value);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::NamedPropertySetter setter =
v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
v8::Handle<v8::Value> result;
@@ -1574,14 +1602,10 @@ Object* JSObject::SetPropertyWithCallback(Object* structure,
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
Handle<String> key(name);
- Handle<Object> fun_data(data->data());
LOG(ApiNamedPropertyAccess("store", this, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(self),
- v8::Utils::ToLocal(fun_data),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data->data(), this, JSObject::cast(holder));
+ v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
@@ -2005,10 +2029,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQuery query =
v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
@@ -2276,11 +2298,9 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) {
if (!interceptor->deleter()->IsUndefined()) {
v8::NamedPropertyDeleter deleter =
v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
@@ -2339,11 +2359,9 @@ Object* JSObject::DeleteElementWithInterceptor(uint32_t index) {
v8::IndexedPropertyDeleter deleter =
v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
Handle<JSObject> this_handle(this);
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Boolean> result;
{
// Leaving JavaScript.
@@ -3940,35 +3958,75 @@ const unibrow::byte* String::ReadBlock(String* input,
}
-FlatStringReader* FlatStringReader::top_ = NULL;
+Relocatable* Relocatable::top_ = NULL;
+
+
+void Relocatable::PostGarbageCollectionProcessing() {
+ Relocatable* current = top_;
+ while (current != NULL) {
+ current->PostGarbageCollection();
+ current = current->prev_;
+ }
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+ return sizeof(top_);
+}
+
+
+// Archive statics that are thread local.
+char* Relocatable::ArchiveState(char* to) {
+ *reinterpret_cast<Relocatable**>(to) = top_;
+ top_ = NULL;
+ return to + ArchiveSpacePerThread();
+}
+
+
+// Restore statics that are thread local.
+char* Relocatable::RestoreState(char* from) {
+ top_ = *reinterpret_cast<Relocatable**>(from);
+ return from + ArchiveSpacePerThread();
+}
+
+
+char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+ Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+ Iterate(v, top);
+ return thread_storage + ArchiveSpacePerThread();
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v) {
+ Iterate(v, top_);
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+ Relocatable* current = top;
+ while (current != NULL) {
+ current->IterateInstance(v);
+ current = current->prev_;
+ }
+}
FlatStringReader::FlatStringReader(Handle<String> str)
: str_(str.location()),
- length_(str->length()),
- prev_(top_) {
- top_ = this;
- RefreshState();
+ length_(str->length()) {
+ PostGarbageCollection();
}
FlatStringReader::FlatStringReader(Vector<const char> input)
- : str_(NULL),
+ : str_(0),
is_ascii_(true),
length_(input.length()),
- start_(input.start()),
- prev_(top_) {
- top_ = this;
-}
-
-
-FlatStringReader::~FlatStringReader() {
- ASSERT_EQ(top_, this);
- top_ = prev_;
-}
+ start_(input.start()) { }
-void FlatStringReader::RefreshState() {
+void FlatStringReader::PostGarbageCollection() {
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
@@ -3981,15 +4039,6 @@ void FlatStringReader::RefreshState() {
}
-void FlatStringReader::PostGarbageCollectionProcessing() {
- FlatStringReader* current = top_;
- while (current != NULL) {
- current->RefreshState();
- current = current->prev_;
- }
-}
-
-
void StringInputBuffer::Seek(unsigned pos) {
Reset(pos, input_);
}
@@ -4924,60 +4973,25 @@ void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
}
-void ObjectVisitor::BeginCodeIteration(Code* code) {
- ASSERT(code->ic_flag() == Code::IC_TARGET_IS_OBJECT);
-}
-
-
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- VisitPointer(rinfo->target_object_address());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* old_target = target;
+ VisitPointer(&target);
+ CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
- VisitPointer(rinfo->call_object_address());
-}
-
-
-// Convert relocatable targets from address to code object address. This is
-// mainly IC call targets but for debugging straight-line code can be replaced
-// with a call instruction which also has to be relocated.
-void Code::ConvertICTargetsFromAddressToObject() {
- ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
-
- for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- Address ic_addr = it.rinfo()->target_address();
- ASSERT(ic_addr != NULL);
- HeapObject* code = HeapObject::FromAddress(ic_addr - Code::kHeaderSize);
- ASSERT(code->IsHeapObject());
- it.rinfo()->set_target_object(code);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::has_break_points()) {
- for (RelocIterator it(this, RelocInfo::ModeMask(RelocInfo::JS_RETURN));
- !it.done();
- it.next()) {
- if (it.rinfo()->IsCallInstruction()) {
- Address addr = it.rinfo()->call_address();
- ASSERT(addr != NULL);
- HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
- ASSERT(code->IsHeapObject());
- it.rinfo()->set_call_object(code);
- }
- }
- }
-#endif
- set_ic_flag(IC_TARGET_IS_OBJECT);
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ Object* old_target = target;
+ VisitPointer(&target);
+ CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
void Code::CodeIterateBody(ObjectVisitor* v) {
- v->BeginCodeIteration(this);
-
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
@@ -5004,38 +5018,6 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
}
ScopeInfo<>::IterateScopeInfo(this, v);
-
- v->EndCodeIteration(this);
-}
-
-
-void Code::ConvertICTargetsFromObjectToAddress() {
- ASSERT(ic_flag() == IC_TARGET_IS_OBJECT);
-
- for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- // We cannot use the safe cast (Code::cast) here, because we may be in
- // the middle of relocating old objects during GC and the map pointer in
- // the code object may be mangled
- Code* code = reinterpret_cast<Code*>(it.rinfo()->target_object());
- ASSERT((code != NULL) && code->IsHeapObject());
- it.rinfo()->set_target_address(code->instruction_start());
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::has_break_points()) {
- for (RelocIterator it(this, RelocInfo::ModeMask(RelocInfo::JS_RETURN));
- !it.done();
- it.next()) {
- if (it.rinfo()->IsCallInstruction()) {
- Code* code = reinterpret_cast<Code*>(it.rinfo()->call_object());
- ASSERT((code != NULL) && code->IsHeapObject());
- it.rinfo()->set_call_address(code->instruction_start());
- }
- }
- }
-#endif
- set_ic_flag(IC_TARGET_IS_ADDRESS);
}
@@ -5465,10 +5447,8 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
- Handle<Object> data_handle(interceptor->data());
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
@@ -5600,11 +5580,9 @@ Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetter setter =
v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
- Handle<Object> data_handle(interceptor->data());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(this_handle));
+ CustomArguments args(interceptor->data(), this, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -5872,13 +5850,11 @@ Object* JSObject::GetElementWithInterceptor(JSObject* receiver,
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
- Handle<Object> data_handle(interceptor->data());
v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
- v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -6110,15 +6086,13 @@ Object* JSObject::GetPropertyWithInterceptor(
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
- Handle<Object> data_handle(interceptor->data());
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetter getter =
v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h
index bd8ca51..e9430f5 100644
--- a/V8Binding/v8/src/objects.h
+++ b/V8Binding/v8/src/objects.h
@@ -211,7 +211,7 @@ enum PropertyNormalizationMode {
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V) \
+#define INSTANCE_TYPE_LIST_ALL(V) \
V(SHORT_SYMBOL_TYPE) \
V(MEDIUM_SYMBOL_TYPE) \
V(LONG_SYMBOL_TYPE) \
@@ -282,8 +282,6 @@ enum PropertyNormalizationMode {
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE) \
V(SCRIPT_TYPE) \
\
V(JS_VALUE_TYPE) \
@@ -297,6 +295,17 @@ enum PropertyNormalizationMode {
\
V(JS_FUNCTION_TYPE) \
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO_TYPE) \
+ V(BREAK_POINT_INFO_TYPE)
+#else
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)
+#endif
+
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_ALL(V) \
+ INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
@@ -673,8 +682,10 @@ enum InstanceType {
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
+#endif
SCRIPT_TYPE,
JS_VALUE_TYPE,
@@ -751,14 +762,17 @@ class Object BASE_EMBEDDED {
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
+#ifdef DEBUG
+ // See objects-inl.h for more details
inline bool IsSeqString();
inline bool IsSlicedString();
inline bool IsExternalString();
- inline bool IsConsString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
+#endif // DEBUG
+ inline bool IsConsString();
inline bool IsNumber();
inline bool IsByteArray();
@@ -889,11 +903,11 @@ class Object BASE_EMBEDDED {
// Smi represents integer Numbers that can be stored in 31 bits.
-// TODO(X64) Increase to 53 bits?
// Smis are immediate which means they are NOT allocated in the heap.
-// The this pointer has the following format: [31 bit signed int] 0
-// TODO(X64): 31 bits signed int sign-extended to 63 bits.
// Smi stands for small integer.
+// The this pointer has the following format: [31 bit signed int] 0
+// On 64-bit, the top 32 bits of the pointer is allowed to have any
+// value.
class Smi: public Object {
public:
// Returns the integer value.
@@ -1392,6 +1406,10 @@ class JSObject: public HeapObject {
// Returns the class name ([[Class]] property in the specification).
String* class_name();
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
+
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
@@ -1424,6 +1442,10 @@ class JSObject: public HeapObject {
// Tells whether this object needs to be loaded.
inline bool IsLoaded();
+ // Returns true if this is an instance of an api function and has
+ // been modified since it was created. May give false positives.
+ bool IsDirty();
+
bool HasProperty(String* name) {
return GetPropertyAttribute(name) != ABSENT;
}
@@ -2512,13 +2534,6 @@ class Code: public HeapObject {
NUMBER_OF_KINDS = KEYED_STORE_IC + 1
};
- // A state indicates that inline cache in this Code object contains
- // objects or relative instruction addresses.
- enum ICTargetState {
- IC_TARGET_IS_ADDRESS,
- IC_TARGET_IS_OBJECT
- };
-
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* Kind2String(Kind kind);
@@ -2558,12 +2573,6 @@ class Code: public HeapObject {
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
- // [ic_flag]: State of inline cache targets. The flag is set to the
- // object variant in ConvertICTargetsFromAddressToObject, and set to
- // the address variant in ConvertICTargetsFromObjectToAddress.
- inline ICTargetState ic_flag();
- inline void set_ic_flag(ICTargetState value);
-
// [major_key]: For kind STUB, the major key.
inline CodeStub::Major major_key();
inline void set_major_key(CodeStub::Major major);
@@ -2609,12 +2618,6 @@ class Code: public HeapObject {
// Returns the address of the scope information.
inline byte* sinfo_start();
- // Convert inline cache target from address to code object before GC.
- void ConvertICTargetsFromAddressToObject();
-
- // Convert inline cache target from code object to address after GC
- void ConvertICTargetsFromObjectToAddress();
-
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
void Relocate(int delta);
@@ -2670,7 +2673,6 @@ class Code: public HeapObject {
~kCodeAlignmentMask;
// Byte offsets within kKindSpecificFlagsOffset.
- static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
// Flags layout.
@@ -3101,9 +3103,7 @@ class SharedFunctionInfo: public HeapObject {
inline bool is_expression();
inline void set_is_expression(bool value);
- // Is this function a top-level function. Used for accessing the
- // caller of functions. Top-level functions (scripts, evals) are
- // returned as null; see JSFunction::GetCallerAccessor(...).
+ // Is this function a top-level function (scripts, evals).
inline bool is_toplevel();
inline void set_is_toplevel(bool value);
@@ -4219,25 +4219,47 @@ class ExternalTwoByteString: public ExternalString {
};
+// Utility superclass for stack-allocated objects that must be updated
+// on gc. It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable BASE_EMBEDDED {
+ public:
+ inline Relocatable() : prev_(top_) { top_ = this; }
+ virtual ~Relocatable() {
+ ASSERT_EQ(top_, this);
+ top_ = prev_;
+ }
+ virtual void IterateInstance(ObjectVisitor* v) { }
+ virtual void PostGarbageCollection() { }
+
+ static void PostGarbageCollectionProcessing();
+ static int ArchiveSpacePerThread();
+ static char* ArchiveState(char* to);
+ static char* RestoreState(char* from);
+ static void Iterate(ObjectVisitor* v);
+ static void Iterate(ObjectVisitor* v, Relocatable* top);
+ static char* Iterate(ObjectVisitor* v, char* t);
+ private:
+ static Relocatable* top_;
+ Relocatable* prev_;
+};
+
+
// A flat string reader provides random access to the contents of a
// string independent of the character width of the string. The handle
// must be valid as long as the reader is being used.
-class FlatStringReader BASE_EMBEDDED {
+class FlatStringReader : public Relocatable {
public:
explicit FlatStringReader(Handle<String> str);
explicit FlatStringReader(Vector<const char> input);
- ~FlatStringReader();
- void RefreshState();
+ void PostGarbageCollection();
inline uc32 Get(int index);
int length() { return length_; }
- static void PostGarbageCollectionProcessing();
private:
String** str_;
bool is_ascii_;
int length_;
const void* start_;
- FlatStringReader* prev_;
- static FlatStringReader* top_;
};
@@ -4403,6 +4425,9 @@ class JSArray: public JSObject {
void JSArrayVerify();
#endif
+ // Number of element slots to pre-allocate for an empty array.
+ static const int kPreallocatedArrayElements = 4;
+
// Layout description.
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
@@ -4804,9 +4829,6 @@ class ObjectVisitor BASE_EMBEDDED {
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
- // Called prior to visiting the body of a Code object.
- virtual void BeginCodeIteration(Code* code);
-
// Visits a code target in the instruction stream.
virtual void VisitCodeTarget(RelocInfo* rinfo);
@@ -4816,9 +4838,6 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
- // Called after completing visiting the body of a Code object.
- virtual void EndCodeIteration(Code* code) {}
-
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc
index 0abb9ed..3b24687 100644
--- a/V8Binding/v8/src/parser.cc
+++ b/V8Binding/v8/src/parser.cc
@@ -798,12 +798,6 @@ class ParserFactory BASE_EMBEDDED {
return Call::sentinel();
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return CallEval::sentinel();
- }
-
virtual Statement* EmptyStatement() {
return NULL;
}
@@ -854,12 +848,6 @@ class AstBuildingParserFactory : public ParserFactory {
return new Call(expression, arguments, pos);
}
- virtual Expression* NewCallEval(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return new CallEval(expression, arguments, pos);
- }
-
virtual Statement* EmptyStatement();
};
@@ -1196,7 +1184,6 @@ Parser::Parser(Handle<Script> script,
bool Parser::PreParseProgram(Handle<String> source,
unibrow::CharacterStream* stream) {
HistogramTimerScope timer(&Counters::pre_parse);
- StackGuard guard;
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
@@ -1937,31 +1924,20 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Statement* Parser::ParseFunctionDeclaration(bool* ok) {
- // Parse a function literal. We may or may not have a function name.
- // If we have a name we use it as the variable name for the function
- // (a function declaration) and not as the function name of a function
- // expression.
-
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
-
- Handle<String> name;
- if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
- FunctionLiteral* fun = ParseFunctionLiteral(name, function_token_position,
- DECLARATION, CHECK_OK);
-
- if (name.is_null()) {
- // We don't have a name - it is always an anonymous function
- // expression.
- return NEW(ExpressionStatement(fun));
- } else {
- // We have a name so even if we're not at the top-level of the
- // global or a function scope, we treat is as such and introduce
- // the function with it's initial value upon entering the
- // corresponding scope.
- Declare(name, Variable::VAR, fun, true, CHECK_OK);
- return factory()->EmptyStatement();
- }
+ Handle<String> name = ParseIdentifier(CHECK_OK);
+ FunctionLiteral* fun = ParseFunctionLiteral(name,
+ function_token_position,
+ DECLARATION,
+ CHECK_OK);
+ // Even if we're not at the top-level of the global or a function
+ // scope, we treat is as such and introduce the function with it's
+ // initial value upon entering the corresponding scope.
+ Declare(name, Variable::VAR, fun, true, CHECK_OK);
+ return factory()->EmptyStatement();
}
@@ -2672,25 +2648,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
} else {
Expression* expression = ParseExpression(false, CHECK_OK);
if (peek() == Token::IN) {
- // Report syntax error if the expression is an invalid
- // left-hand side expression.
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report
+ // the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_for_in",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+ expression = NewThrowReferenceError(type);
}
ForInStatement* loop = NEW(ForInStatement(labels));
Target target(this, loop);
@@ -2767,30 +2731,15 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
return expression;
}
+ // Signal a reference error if the expression is an invalid left-hand
+ // side expression. We could report this as a syntax error here but
+ // for compatibility with JSC we choose to report the error at
+ // runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- if (expression != NULL && expression->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
- expression = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- //
- // NOTE: KJS sometimes delay the error reporting to runtime. If
- // we want to be completely compatible we should do the same.
- // For example: "(x++) = 42" gives a reference error at runtime
- // with KJS whereas we report a syntax error at compile time.
- ReportMessage("invalid_lhs_in_assignment", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+ expression = NewThrowReferenceError(type);
}
-
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2963,45 +2912,37 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
// Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber()) {
- double x_val = x->AsLiteral()->handle()->Number();
+ if (expression != NULL && expression->AsLiteral() &&
+ expression->AsLiteral()->handle()->IsNumber()) {
+ double value = expression->AsLiteral()->handle()->Number();
switch (op) {
case Token::ADD:
- return x;
+ return expression;
case Token::SUB:
- return NewNumberLiteral(-x_val);
+ return NewNumberLiteral(-value);
case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(x_val));
+ return NewNumberLiteral(~DoubleToInt32(value));
default: break;
}
}
- return NEW(UnaryOperation(op, x));
+ return NEW(UnaryOperation(op, expression));
} else if (Token::IsCountOp(op)) {
op = Next();
- Expression* x = ParseUnaryExpression(CHECK_OK);
- if (x == NULL || !x->IsValidLeftHandSide()) {
- if (x != NULL && x->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
- x = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_prefix_op", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ Expression* expression = ParseUnaryExpression(CHECK_OK);
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
- return NEW(CountOperation(true /* prefix */, op, x));
+ return NEW(CountOperation(true /* prefix */, op, expression));
} else {
return ParsePostfixExpression(ok);
@@ -3013,30 +2954,20 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- Expression* result = ParseLeftHandSideExpression(CHECK_OK);
+ Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
- if (result == NULL || !result->IsValidLeftHandSide()) {
- if (result != NULL && result->AsCall() != NULL) {
- // According to ECMA-262 host function calls are permitted to
- // return references. This cannot happen in our system so we
- // will always get an error. We could report this as a syntax
- // error here but for compatibility with KJS and SpiderMonkey we
- // choose to report the error at runtime.
- Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
- result = NewThrowReferenceError(type);
- } else {
- // Invalid left hand side expressions that are not function
- // calls are reported as syntax errors at compile time.
- ReportMessage("invalid_lhs_in_postfix_op",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
+ // Signal a reference error if the expression is an invalid
+ // left-hand side expression. We could report this as a syntax
+ // error here but for compatibility with JSC we choose to report the
+ // error at runtime.
+ if (expression == NULL || !expression->IsValidLeftHandSide()) {
+ Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+ expression = NewThrowReferenceError(type);
}
Token::Value next = Next();
- result = NEW(CountOperation(false /* postfix */, next, result));
+ expression = NEW(CountOperation(false /* postfix */, next, expression));
}
- return result;
+ return expression;
}
@@ -3074,8 +3005,6 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// declared in the current scope chain. These calls are marked as
// potentially direct eval calls. Whether they are actually direct calls
// to eval is determined at run time.
-
- bool is_potentially_direct_eval = false;
if (!is_pre_parsing_) {
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
@@ -3083,16 +3012,10 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
top_scope_->RecordEvalCall();
- is_potentially_direct_eval = true;
}
}
}
-
- if (is_potentially_direct_eval) {
- result = factory()->NewCallEval(result, args, pos);
- } else {
- result = factory()->NewCall(result, args, pos);
- }
+ result = factory()->NewCall(result, args, pos);
break;
}
@@ -4840,8 +4763,6 @@ bool ParseRegExp(FlatStringReader* input,
bool multiline,
RegExpCompileData* result) {
ASSERT(result != NULL);
- // Make sure we have a stack guard.
- StackGuard guard;
RegExpParser parser(input, &result->error, multiline);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
diff --git a/V8Binding/v8/src/platform-freebsd.cc b/V8Binding/v8/src/platform-freebsd.cc
index 92d72f8..73d6eeb 100644
--- a/V8Binding/v8/src/platform-freebsd.cc
+++ b/V8Binding/v8/src/platform-freebsd.cc
@@ -141,7 +141,9 @@ void* OS::Allocate(const size_t requested,
void OS::Free(void* buf, const size_t length) {
// TODO(1240712): munmap has a return value which is ignored here.
- munmap(buf, length);
+ int result = munmap(buf, length);
+ USE(result);
+ ASSERT(result == 0);
}
@@ -334,7 +336,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
@@ -552,14 +554,18 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined (__arm__) || defined(__thumb__)
- sample.pc = mcontext.mc_r15;
- sample.sp = mcontext.mc_r13;
- sample.fp = mcontext.mc_r11;
-#else
+#if V8_HOST_ARCH_IA32
sample.pc = mcontext.mc_eip;
sample.sp = mcontext.mc_esp;
sample.fp = mcontext.mc_ebp;
+#elif V8_HOST_ARCH_X64
+ sample.pc = mcontext.mc_rip;
+ sample.sp = mcontext.mc_rsp;
+ sample.fp = mcontext.mc_rbp;
+#elif V8_HOST_ARCH_ARM
+ sample.pc = mcontext.mc_r15;
+ sample.sp = mcontext.mc_r13;
+ sample.fp = mcontext.mc_r11;
#endif
active_sampler_->SampleStack(&sample);
}
diff --git a/V8Binding/v8/src/platform-linux.cc b/V8Binding/v8/src/platform-linux.cc
index cb93afb..fe4c31f 100644
--- a/V8Binding/v8/src/platform-linux.cc
+++ b/V8Binding/v8/src/platform-linux.cc
@@ -147,7 +147,9 @@ void* OS::Allocate(const size_t requested,
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
- munmap(address, size);
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
}
@@ -362,7 +364,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/V8Binding/v8/src/platform-macos.cc b/V8Binding/v8/src/platform-macos.cc
index a78142a..0b236a5 100644
--- a/V8Binding/v8/src/platform-macos.cc
+++ b/V8Binding/v8/src/platform-macos.cc
@@ -42,6 +42,7 @@
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
+#include <mach/vm_statistics.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
@@ -123,12 +124,22 @@ size_t OS::AllocateAlignment() {
}
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+ void* mbase = mmap(NULL, msize, prot,
+ MAP_PRIVATE | MAP_ANON,
+ kMmapFd, kMmapFdOffset);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
@@ -141,7 +152,9 @@ void* OS::Allocate(const size_t requested,
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
- munmap(address, size);
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
}
@@ -278,9 +291,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
}
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
@@ -318,7 +328,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
diff --git a/V8Binding/v8/src/prettyprinter.cc b/V8Binding/v8/src/prettyprinter.cc
index 7a8af40..bf66c4b 100644
--- a/V8Binding/v8/src/prettyprinter.cc
+++ b/V8Binding/v8/src/prettyprinter.cc
@@ -358,11 +358,6 @@ void PrettyPrinter::VisitCall(Call* node) {
}
-void PrettyPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void PrettyPrinter::VisitCallNew(CallNew* node) {
Print("new (");
Visit(node->expression());
@@ -1040,11 +1035,6 @@ void AstPrinter::VisitCall(Call* node) {
}
-void AstPrinter::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void AstPrinter::VisitCallNew(CallNew* node) {
IndentedScope indent("CALL NEW");
Visit(node->expression());
diff --git a/V8Binding/v8/src/regexp-stack.cc b/V8Binding/v8/src/regexp-stack.cc
index 83cb6e4..87a674d 100644
--- a/V8Binding/v8/src/regexp-stack.cc
+++ b/V8Binding/v8/src/regexp-stack.cc
@@ -69,6 +69,14 @@ void RegExpStack::Reset() {
}
+void RegExpStack::ThreadLocal::Free() {
+ if (thread_local_.memory_size_ > 0) {
+ DeleteArray(thread_local_.memory_);
+ thread_local_ = ThreadLocal();
+ }
+}
+
+
Address RegExpStack::EnsureCapacity(size_t size) {
if (size > kMaximumStackSize) return NULL;
if (size < kMinimumStackSize) size = kMinimumStackSize;
diff --git a/V8Binding/v8/src/regexp-stack.h b/V8Binding/v8/src/regexp-stack.h
index 99cf33c..319ab28 100644
--- a/V8Binding/v8/src/regexp-stack.h
+++ b/V8Binding/v8/src/regexp-stack.h
@@ -71,6 +71,7 @@ class RegExpStack {
static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
static char* ArchiveStack(char* to);
static char* RestoreStack(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
private:
// Artificial limit used when no memory has been allocated.
@@ -92,6 +93,7 @@ class RegExpStack {
Address memory_;
size_t memory_size_;
Address limit_;
+ void Free();
};
// Resets the buffer if it has grown beyond the default/minimum size.
diff --git a/V8Binding/v8/src/rewriter.cc b/V8Binding/v8/src/rewriter.cc
index d6ea68e..11fc071 100644
--- a/V8Binding/v8/src/rewriter.cc
+++ b/V8Binding/v8/src/rewriter.cc
@@ -383,12 +383,6 @@ void AstOptimizer::VisitCall(Call* node) {
}
-void AstOptimizer::VisitCallEval(CallEval* node) {
- Visit(node->expression());
- OptimizeArguments(node->arguments());
-}
-
-
void AstOptimizer::VisitCallNew(CallNew* node) {
Visit(node->expression());
OptimizeArguments(node->arguments());
@@ -759,12 +753,6 @@ void Processor::VisitCall(Call* node) {
}
-void Processor::VisitCallEval(CallEval* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
void Processor::VisitCallNew(CallNew* node) {
USE(node);
UNREACHABLE();
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index c26783a..4e1940d 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -1208,6 +1208,14 @@ static Object* Runtime_FunctionIsAPIFunction(Arguments args) {
: Heap::false_value();
}
+static Object* Runtime_FunctionIsBuiltin(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ return f->IsBuiltin() ? Heap::true_value() : Heap::false_value();
+}
+
static Object* Runtime_SetCode(Arguments args) {
HandleScope scope;
@@ -2992,7 +3000,8 @@ static Object* Runtime_GetPropertyNamesFast(Arguments args) {
HandleScope scope;
Handle<JSObject> object(raw_object);
- Handle<FixedArray> content = GetKeysInFixedArrayFor(object);
+ Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
+ INCLUDE_PROTOS);
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -3001,6 +3010,34 @@ static Object* Runtime_GetPropertyNamesFast(Arguments args) {
}
+static Object* Runtime_LocalKeys(Arguments args) {
+ ASSERT_EQ(args.length(), 1);
+ CONVERT_CHECKED(JSObject, raw_object, args[0]);
+ HandleScope scope;
+ Handle<JSObject> object(raw_object);
+ Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
+ LOCAL_ONLY);
+ // Some fast paths through GetKeysInFixedArrayFor reuse a cached
+ // property array and since the result is mutable we have to create
+ // a fresh clone on each invocation.
+ int length = contents->length();
+ Handle<FixedArray> copy = Factory::NewFixedArray(length);
+ for (int i = 0; i < length; i++) {
+ Object* entry = contents->get(i);
+ if (entry->IsString()) {
+ copy->set(i, entry);
+ } else {
+ ASSERT(entry->IsNumber());
+ HandleScope scope;
+ Handle<Object> entry_handle(entry);
+ Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+ copy->set(i, *entry_str);
+ }
+ }
+ return *Factory::NewJSArrayWithElements(copy);
+}
+
+
static Object* Runtime_GetArgumentsProperty(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -3562,27 +3599,7 @@ static Object* Runtime_NumberToString(Arguments args) {
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- Object* cached = Heap::GetNumberStringCache(number);
- if (cached != Heap::undefined_value()) {
- return cached;
- }
-
- char arr[100];
- Vector<char> buffer(arr, ARRAY_SIZE(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Smi::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = HeapNumber::cast(number)->value();
- str = DoubleToCString(num, buffer);
- }
- Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
-
- if (!result->IsFailure()) {
- Heap::SetNumberStringCache(number, String::cast(result));
- }
- return result;
+ return Heap::NumberToString(number);
}
@@ -3696,7 +3713,7 @@ static Object* Runtime_NumberMod(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]);
-#ifdef WIN32
+#if defined WIN32 || defined _WIN64
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
// dividend is a zero and divisor is nonzero finite => result equals dividend
@@ -4556,22 +4573,25 @@ static Object* Runtime_LookupContext(Arguments args) {
}
-// A mechanism to return pairs of Object*'s. This is somewhat
-// compiler-dependent as it assumes that a 64-bit value (a long long)
-// is returned via two registers (edx:eax on ia32). Both the ia32 and
-// arm platform support this; it is mostly an issue of "coaxing" the
-// compiler to do the right thing.
-//
-// TODO(1236026): This is a non-portable hack that should be removed.
+// A mechanism to return a pair of Object pointers in registers (if possible).
+// How this is achieved is calling convention-dependent.
+// All currently supported x86 compiles uses calling conventions that are cdecl
+// variants where a 64-bit value is returned in two 32-bit registers
+// (edx:eax on ia32, r1:r0 on ARM).
+// In AMD-64 calling convention a struct of two pointers is returned in rdx:rax.
+// In Win64 calling convention, a struct of two pointers is returned in memory,
+// allocated by the caller, and passed as a pointer in a hidden first parameter.
#ifdef V8_HOST_ARCH_64_BIT
-// Tested with GCC, not with MSVC.
struct ObjectPair {
Object* x;
Object* y;
};
+
static inline ObjectPair MakePair(Object* x, Object* y) {
ObjectPair result = {x, y};
- return result; // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
+ // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
+ // In Win64 they are assigned to a hidden first argument.
+ return result;
}
#else
typedef uint64_t ObjectPair;
@@ -4582,8 +4602,6 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
#endif
-
-
static inline Object* Unhole(Object* x, PropertyAttributes attributes) {
ASSERT(!x->IsTheHole() || (attributes & READ_ONLY) != 0);
USE(attributes);
@@ -5515,7 +5533,7 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
if (array->elements()->IsDictionary()) {
// Create an array and get all the keys into it, then remove all the
// keys that are not integers in the range 0 to length-1.
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(array);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
int keys_length = keys->length();
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
@@ -5737,55 +5755,51 @@ static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
int length = LocalPrototypeChainLength(*obj);
// Try local lookup on each of the objects.
- LookupResult result;
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
+ LookupResult result;
jsproto->LocalLookup(*name, &result);
if (result.IsProperty()) {
- break;
+ // LookupResult is not GC safe as it holds raw object pointers.
+ // GC can happen later in this code so put the required fields into
+ // local variables using handles when required for later use.
+ PropertyType result_type = result.type();
+ Handle<Object> result_callback_obj;
+ if (result_type == CALLBACKS) {
+ result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ }
+ Smi* property_details = result.GetPropertyDetails().AsSmi();
+ // DebugLookupResultValue can cause GC so details from LookupResult needs
+ // to be copied to handles before this.
+ bool caught_exception = false;
+ Object* raw_value = DebugLookupResultValue(*obj, *name, &result,
+ &caught_exception);
+ if (raw_value->IsFailure()) return raw_value;
+ Handle<Object> value(raw_value);
+
+ // If the callback object is a fixed array then it contains JavaScript
+ // getter and/or setter.
+ bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ result_callback_obj->IsFixedArray();
+ Handle<FixedArray> details =
+ Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+ details->set(0, *value);
+ details->set(1, property_details);
+ if (hasJavaScriptAccessors) {
+ details->set(2,
+ caught_exception ? Heap::true_value()
+ : Heap::false_value());
+ details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
+ details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
+ }
+
+ return *Factory::NewJSArrayWithElements(details);
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- if (result.IsProperty()) {
- // LookupResult is not GC safe as all its members are raw object pointers.
- // When calling DebugLookupResultValue GC can happen as this might invoke
- // callbacks. After the call to DebugLookupResultValue the callback object
- // in the LookupResult might still be needed. Put it into a handle for later
- // use.
- PropertyType result_type = result.type();
- Handle<Object> result_callback_obj;
- if (result_type == CALLBACKS) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject());
- }
-
- // Find the actual value. Don't use result after this call as it's content
- // can be invalid.
- bool caught_exception = false;
- Object* value = DebugLookupResultValue(*obj, *name, &result,
- &caught_exception);
- if (value->IsFailure()) return value;
- Handle<Object> value_handle(value);
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool hasJavaScriptAccessors = result_type == CALLBACKS &&
- result_callback_obj->IsFixedArray();
- Handle<FixedArray> details =
- Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
- details->set(0, *value_handle);
- details->set(1, result.GetPropertyDetails().AsSmi());
- if (hasJavaScriptAccessors) {
- details->set(2,
- caught_exception ? Heap::true_value() : Heap::false_value());
- details->set(3, FixedArray::cast(result.GetCallbackObject())->get(0));
- details->set(4, FixedArray::cast(result.GetCallbackObject())->get(1));
- }
-
- return *Factory::NewJSArrayWithElements(details);
- }
return Heap::undefined_value();
}
@@ -6270,7 +6284,7 @@ static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
@@ -6319,7 +6333,7 @@ static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
@@ -6341,7 +6355,12 @@ class ScopeIterator {
ScopeTypeGlobal = 0,
ScopeTypeLocal,
ScopeTypeWith,
- ScopeTypeClosure
+ ScopeTypeClosure,
+ // Every catch block contains an implicit with block (its parameter is
+ // a JSContextExtensionObject) that extends current scope with a variable
+ // holding exception object. Such with blocks are treated as scopes of their
+ // own type.
+ ScopeTypeCatch
};
explicit ScopeIterator(JavaScriptFrame* frame)
@@ -6417,7 +6436,14 @@ class ScopeIterator {
return ScopeTypeClosure;
}
ASSERT(context_->has_extension());
- ASSERT(!context_->extension()->IsJSContextExtensionObject());
+ // Current scope is either an explicit with statement or a with statement
+ // implicitely generated for a catch block.
+ // If the extension object here is a JSContextExtensionObject then
+ // current with statement is one frome a catch block otherwise it's a
+ // regular with statement.
+ if (context_->extension()->IsJSContextExtensionObject()) {
+ return ScopeTypeCatch;
+ }
return ScopeTypeWith;
}
@@ -6432,6 +6458,7 @@ class ScopeIterator {
return MaterializeLocalScope(frame_);
break;
case ScopeIterator::ScopeTypeWith:
+ case ScopeIterator::ScopeTypeCatch:
// Return the with object.
return Handle<JSObject>(CurrentContext()->extension());
break;
@@ -6488,6 +6515,14 @@ class ScopeIterator {
break;
}
+ case ScopeIterator::ScopeTypeCatch: {
+ PrintF("Catch:\n");
+ Handle<JSObject> extension =
+ Handle<JSObject>(CurrentContext()->extension());
+ extension->Print();
+ break;
+ }
+
case ScopeIterator::ScopeTypeClosure: {
PrintF("Closure:\n");
CurrentContext()->Print();
@@ -6799,8 +6834,20 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
target_start_position = start_position;
target = shared;
} else {
- if (target_start_position < start_position &&
- shared->end_position() < target->end_position()) {
+ if (target_start_position == start_position &&
+ shared->end_position() == target->end_position()) {
+ // If a top-level function contain only one function
+ // declartion the source for the top-level and the function is
+ // the same. In that case prefer the non top-level function.
+ if (!shared->is_toplevel()) {
+ target_start_position = start_position;
+ target = shared;
+ }
+ } else if (target_start_position <= start_position &&
+ shared->end_position() <= target->end_position()) {
+ // This containment check includes equality as a function inside
+ // a top-level function can share either start or end position
+ // with the top-level function.
target_start_position = start_position;
target = shared;
}
@@ -6912,7 +6959,8 @@ static Object* Runtime_ChangeBreakOnException(Arguments args) {
// Prepare for stepping
// args[0]: break id for checking execution state
// args[1]: step action from the enumeration StepAction
-// args[2]: number of times to perform the step
+// args[2]: number of times to perform the step, for step out it is the number
+// of frames to step down.
static Object* Runtime_PrepareStep(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
@@ -6939,6 +6987,9 @@ static Object* Runtime_PrepareStep(Arguments args) {
return Top::Throw(Heap::illegal_argument_symbol());
}
+ // Clear all current stepping setup.
+ Debug::ClearStepping();
+
// Prepare step.
Debug::PrepareStep(static_cast<StepAction>(step_action), step_count);
return Heap::undefined_value();
@@ -7089,7 +7140,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
// the function being debugged.
// function(arguments,__source__) {return eval(__source__);}
static const char* source_str =
- "function(arguments,__source__){return eval(__source__);}";
+ "(function(arguments,__source__){return eval(__source__);})";
static const int source_str_length = strlen(source_str);
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
@@ -7598,7 +7649,7 @@ static Object* Runtime_ListNatives(Arguments args) {
HandleScope scope;
Handle<JSArray> result = Factory::NewJSArray(0);
int index = 0;
-#define ADD_ENTRY(Name, argc) \
+#define ADD_ENTRY(Name, argc, ressize) \
{ \
HandleScope inner; \
Handle<String> name = \
@@ -7634,13 +7685,13 @@ static Object* Runtime_IS_VAR(Arguments args) {
// ----------------------------------------------------------------------------
// Implementation of Runtime
-#define F(name, nargs) \
+#define F(name, nargs, ressize) \
{ #name, "RuntimeStub_" #name, FUNCTION_ADDR(Runtime_##name), nargs, \
- static_cast<int>(Runtime::k##name) },
+ static_cast<int>(Runtime::k##name), ressize },
static Runtime::Function Runtime_functions[] = {
RUNTIME_FUNCTION_LIST(F)
- { NULL, NULL, NULL, 0, -1 }
+ { NULL, NULL, NULL, 0, -1, 0 }
};
#undef F
diff --git a/V8Binding/v8/src/runtime.h b/V8Binding/v8/src/runtime.h
index 1be677a..afa278b 100644
--- a/V8Binding/v8/src/runtime.h
+++ b/V8Binding/v8/src/runtime.h
@@ -43,269 +43,272 @@ namespace internal {
// this problem. Please avoid large recursive macros whenever possible.
#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
/* Property access */ \
- F(GetProperty, 2) \
- F(KeyedGetProperty, 2) \
- F(DeleteProperty, 2) \
- F(HasLocalProperty, 2) \
- F(HasProperty, 2) \
- F(HasElement, 2) \
- F(IsPropertyEnumerable, 2) \
- F(GetPropertyNames, 1) \
- F(GetPropertyNamesFast, 1) \
- F(GetArgumentsProperty, 1) \
- F(ToFastProperties, 1) \
- F(ToSlowProperties, 1) \
- \
- F(IsInPrototypeChain, 2) \
- F(SetHiddenPrototype, 2) \
- \
- F(IsConstructCall, 0) \
+ F(GetProperty, 2, 1) \
+ F(KeyedGetProperty, 2, 1) \
+ F(DeleteProperty, 2, 1) \
+ F(HasLocalProperty, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(HasElement, 2, 1) \
+ F(IsPropertyEnumerable, 2, 1) \
+ F(GetPropertyNames, 1, 1) \
+ F(GetPropertyNamesFast, 1, 1) \
+ F(GetArgumentsProperty, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(ToSlowProperties, 1, 1) \
+ \
+ F(IsInPrototypeChain, 2, 1) \
+ F(SetHiddenPrototype, 2, 1) \
+ \
+ F(IsConstructCall, 0, 1) \
\
/* Utilities */ \
- F(GetCalledFunction, 0) \
- F(GetFunctionDelegate, 1) \
- F(GetConstructorDelegate, 1) \
- F(NewArguments, 1) \
- F(NewArgumentsFast, 3) \
- F(LazyCompile, 1) \
- F(SetNewFunctionAttributes, 1) \
+ F(GetCalledFunction, 0, 1) \
+ F(GetFunctionDelegate, 1, 1) \
+ F(GetConstructorDelegate, 1, 1) \
+ F(NewArguments, 1, 1) \
+ F(NewArgumentsFast, 3, 1) \
+ F(LazyCompile, 1, 1) \
+ F(SetNewFunctionAttributes, 1, 1) \
\
/* Array join support */ \
- F(PushIfAbsent, 2) \
- F(ArrayConcat, 1) \
+ F(PushIfAbsent, 2, 1) \
+ F(ArrayConcat, 1, 1) \
\
/* Conversions */ \
- F(ToBool, 1) \
- F(Typeof, 1) \
- \
- F(StringToNumber, 1) \
- F(StringFromCharCodeArray, 1) \
- F(StringParseInt, 2) \
- F(StringParseFloat, 1) \
- F(StringToLowerCase, 1) \
- F(StringToUpperCase, 1) \
- F(CharFromCode, 1) \
- F(URIEscape, 1) \
- F(URIUnescape, 1) \
- \
- F(NumberToString, 1) \
- F(NumberToInteger, 1) \
- F(NumberToJSUint32, 1) \
- F(NumberToJSInt32, 1) \
- F(NumberToSmi, 1) \
+ F(ToBool, 1, 1) \
+ F(Typeof, 1, 1) \
+ \
+ F(StringToNumber, 1, 1) \
+ F(StringFromCharCodeArray, 1, 1) \
+ F(StringParseInt, 2, 1) \
+ F(StringParseFloat, 1, 1) \
+ F(StringToLowerCase, 1, 1) \
+ F(StringToUpperCase, 1, 1) \
+ F(CharFromCode, 1, 1) \
+ F(URIEscape, 1, 1) \
+ F(URIUnescape, 1, 1) \
+ \
+ F(NumberToString, 1, 1) \
+ F(NumberToInteger, 1, 1) \
+ F(NumberToJSUint32, 1, 1) \
+ F(NumberToJSInt32, 1, 1) \
+ F(NumberToSmi, 1, 1) \
\
/* Arithmetic operations */ \
- F(NumberAdd, 2) \
- F(NumberSub, 2) \
- F(NumberMul, 2) \
- F(NumberDiv, 2) \
- F(NumberMod, 2) \
- F(NumberUnaryMinus, 1) \
+ F(NumberAdd, 2, 1) \
+ F(NumberSub, 2, 1) \
+ F(NumberMul, 2, 1) \
+ F(NumberDiv, 2, 1) \
+ F(NumberMod, 2, 1) \
+ F(NumberUnaryMinus, 1, 1) \
\
- F(StringAdd, 2) \
- F(StringBuilderConcat, 2) \
+ F(StringAdd, 2, 1) \
+ F(StringBuilderConcat, 2, 1) \
\
/* Bit operations */ \
- F(NumberOr, 2) \
- F(NumberAnd, 2) \
- F(NumberXor, 2) \
- F(NumberNot, 1) \
+ F(NumberOr, 2, 1) \
+ F(NumberAnd, 2, 1) \
+ F(NumberXor, 2, 1) \
+ F(NumberNot, 1, 1) \
\
- F(NumberShl, 2) \
- F(NumberShr, 2) \
- F(NumberSar, 2) \
+ F(NumberShl, 2, 1) \
+ F(NumberShr, 2, 1) \
+ F(NumberSar, 2, 1) \
\
/* Comparisons */ \
- F(NumberEquals, 2) \
- F(StringEquals, 2) \
+ F(NumberEquals, 2, 1) \
+ F(StringEquals, 2, 1) \
\
- F(NumberCompare, 3) \
- F(SmiLexicographicCompare, 2) \
- F(StringCompare, 2) \
+ F(NumberCompare, 3, 1) \
+ F(SmiLexicographicCompare, 2, 1) \
+ F(StringCompare, 2, 1) \
\
/* Math */ \
- F(Math_abs, 1) \
- F(Math_acos, 1) \
- F(Math_asin, 1) \
- F(Math_atan, 1) \
- F(Math_atan2, 2) \
- F(Math_ceil, 1) \
- F(Math_cos, 1) \
- F(Math_exp, 1) \
- F(Math_floor, 1) \
- F(Math_log, 1) \
- F(Math_pow, 2) \
- F(Math_round, 1) \
- F(Math_sin, 1) \
- F(Math_sqrt, 1) \
- F(Math_tan, 1) \
+ F(Math_abs, 1, 1) \
+ F(Math_acos, 1, 1) \
+ F(Math_asin, 1, 1) \
+ F(Math_atan, 1, 1) \
+ F(Math_atan2, 2, 1) \
+ F(Math_ceil, 1, 1) \
+ F(Math_cos, 1, 1) \
+ F(Math_exp, 1, 1) \
+ F(Math_floor, 1, 1) \
+ F(Math_log, 1, 1) \
+ F(Math_pow, 2, 1) \
+ F(Math_round, 1, 1) \
+ F(Math_sin, 1, 1) \
+ F(Math_sqrt, 1, 1) \
+ F(Math_tan, 1, 1) \
\
/* Regular expressions */ \
- F(RegExpCompile, 3) \
- F(RegExpExec, 4) \
+ F(RegExpCompile, 3, 1) \
+ F(RegExpExec, 4, 1) \
\
/* Strings */ \
- F(StringCharCodeAt, 2) \
- F(StringIndexOf, 3) \
- F(StringLastIndexOf, 3) \
- F(StringLocaleCompare, 2) \
- F(StringSlice, 3) \
- F(StringReplaceRegExpWithString, 4) \
- F(StringMatch, 3) \
+ F(StringCharCodeAt, 2, 1) \
+ F(StringIndexOf, 3, 1) \
+ F(StringLastIndexOf, 3, 1) \
+ F(StringLocaleCompare, 2, 1) \
+ F(StringSlice, 3, 1) \
+ F(StringReplaceRegExpWithString, 4, 1) \
+ F(StringMatch, 3, 1) \
\
/* Numbers */ \
- F(NumberToRadixString, 2) \
- F(NumberToFixed, 2) \
- F(NumberToExponential, 2) \
- F(NumberToPrecision, 2)
+ F(NumberToRadixString, 2, 1) \
+ F(NumberToFixed, 2, 1) \
+ F(NumberToExponential, 2, 1) \
+ F(NumberToPrecision, 2, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
/* Reflection */ \
- F(FunctionSetInstanceClassName, 2) \
- F(FunctionSetLength, 2) \
- F(FunctionSetPrototype, 2) \
- F(FunctionGetName, 1) \
- F(FunctionSetName, 2) \
- F(FunctionGetSourceCode, 1) \
- F(FunctionGetScript, 1) \
- F(FunctionGetScriptSourcePosition, 1) \
- F(FunctionGetPositionForOffset, 2) \
- F(FunctionIsAPIFunction, 1) \
- F(GetScript, 1) \
- F(CollectStackTrace, 2) \
- \
- F(ClassOf, 1) \
- F(SetCode, 2) \
- \
- F(CreateApiFunction, 1) \
- F(IsTemplate, 1) \
- F(GetTemplateField, 2) \
- F(DisableAccessChecks, 1) \
- F(EnableAccessChecks, 1) \
+ F(FunctionSetInstanceClassName, 2, 1) \
+ F(FunctionSetLength, 2, 1) \
+ F(FunctionSetPrototype, 2, 1) \
+ F(FunctionGetName, 1, 1) \
+ F(FunctionSetName, 2, 1) \
+ F(FunctionGetSourceCode, 1, 1) \
+ F(FunctionGetScript, 1, 1) \
+ F(FunctionGetScriptSourcePosition, 1, 1) \
+ F(FunctionGetPositionForOffset, 2, 1) \
+ F(FunctionIsAPIFunction, 1, 1) \
+ F(FunctionIsBuiltin, 1, 1) \
+ F(GetScript, 1, 1) \
+ F(CollectStackTrace, 2, 1) \
+ \
+ F(ClassOf, 1, 1) \
+ F(SetCode, 2, 1) \
+ \
+ F(CreateApiFunction, 1, 1) \
+ F(IsTemplate, 1, 1) \
+ F(GetTemplateField, 2, 1) \
+ F(DisableAccessChecks, 1, 1) \
+ F(EnableAccessChecks, 1, 1) \
\
/* Dates */ \
- F(DateCurrentTime, 0) \
- F(DateParseString, 2) \
- F(DateLocalTimezone, 1) \
- F(DateLocalTimeOffset, 0) \
- F(DateDaylightSavingsOffset, 1) \
+ F(DateCurrentTime, 0, 1) \
+ F(DateParseString, 2, 1) \
+ F(DateLocalTimezone, 1, 1) \
+ F(DateLocalTimeOffset, 0, 1) \
+ F(DateDaylightSavingsOffset, 1, 1) \
\
/* Numbers */ \
- F(NumberIsFinite, 1) \
+ F(NumberIsFinite, 1, 1) \
\
/* Globals */ \
- F(CompileString, 2) \
- F(GlobalPrint, 1) \
+ F(CompileString, 2, 1) \
+ F(GlobalPrint, 1, 1) \
\
/* Eval */ \
- F(GlobalReceiver, 1) \
- F(ResolvePossiblyDirectEval, 2) \
+ F(GlobalReceiver, 1, 1) \
+ F(ResolvePossiblyDirectEval, 2, 1) \
\
- F(SetProperty, -1 /* 3 or 4 */) \
- F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */) \
+ F(SetProperty, -1 /* 3 or 4 */, 1) \
+ F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
\
/* Arrays */ \
- F(RemoveArrayHoles, 2) \
- F(GetArrayKeys, 2) \
- F(MoveArrayContents, 2) \
- F(EstimateNumberOfElements, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
\
/* Getters and Setters */ \
- F(DefineAccessor, -1 /* 4 or 5 */) \
- F(LookupAccessor, 3) \
+ F(DefineAccessor, -1 /* 4 or 5 */, 1) \
+ F(LookupAccessor, 3, 1) \
\
/* Literals */ \
- F(MaterializeRegExpLiteral, 4)\
- F(CreateArrayLiteralBoilerplate, 3) \
- F(CreateObjectLiteralBoilerplate, 3) \
- F(CloneLiteralBoilerplate, 1) \
- F(CloneShallowLiteralBoilerplate, 1) \
+ F(MaterializeRegExpLiteral, 4, 1)\
+ F(CreateArrayLiteralBoilerplate, 3, 1) \
+ F(CreateObjectLiteralBoilerplate, 3, 1) \
+ F(CloneLiteralBoilerplate, 1, 1) \
+ F(CloneShallowLiteralBoilerplate, 1, 1) \
\
/* Catch context extension objects */ \
- F(CreateCatchExtensionObject, 2) \
+ F(CreateCatchExtensionObject, 2, 1) \
\
/* Statements */ \
- F(NewClosure, 2) \
- F(NewObject, 1) \
- F(Throw, 1) \
- F(ReThrow, 1) \
- F(ThrowReferenceError, 1) \
- F(StackGuard, 1) \
+ F(NewClosure, 2, 1) \
+ F(NewObject, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(StackGuard, 1, 1) \
\
/* Contexts */ \
- F(NewContext, 1) \
- F(PushContext, 1) \
- F(PushCatchContext, 1) \
- F(LookupContext, 2) \
- F(LoadContextSlot, 2) \
- F(LoadContextSlotNoReferenceError, 2) \
- F(StoreContextSlot, 3) \
+ F(NewContext, 1, 1) \
+ F(PushContext, 1, 1) \
+ F(PushCatchContext, 1, 1) \
+ F(LookupContext, 2, 1) \
+ F(LoadContextSlot, 2, 2) \
+ F(LoadContextSlotNoReferenceError, 2, 2) \
+ F(StoreContextSlot, 3, 1) \
\
/* Declarations and initialization */ \
- F(DeclareGlobals, 3) \
- F(DeclareContextSlot, 4) \
- F(InitializeVarGlobal, -1 /* 1 or 2 */) \
- F(InitializeConstGlobal, 2) \
- F(InitializeConstContextSlot, 3) \
- F(OptimizeObjectForAddingMultipleProperties, 2) \
- F(TransformToFastProperties, 1) \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareContextSlot, 4, 1) \
+ F(InitializeVarGlobal, -1 /* 1 or 2 */, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(InitializeConstContextSlot, 3, 1) \
+ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(TransformToFastProperties, 1, 1) \
\
/* Debugging */ \
- F(DebugPrint, 1) \
- F(DebugTrace, 0) \
- F(TraceEnter, 0) \
- F(TraceExit, 1) \
- F(Abort, 2) \
+ F(DebugPrint, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(Abort, 2, 1) \
/* Logging */ \
- F(Log, 2) \
+ F(Log, 2, 1) \
+ /* ES5 */ \
+ F(LocalKeys, 1, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1)
+ F(IS_VAR, 1, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
/* Debugger support*/ \
- F(DebugBreak, 0) \
- F(SetDebugEventListener, 2) \
- F(Break, 0) \
- F(DebugGetPropertyDetails, 2) \
- F(DebugGetProperty, 2) \
- F(DebugLocalPropertyNames, 1) \
- F(DebugLocalElementNames, 1) \
- F(DebugPropertyTypeFromDetails, 1) \
- F(DebugPropertyAttributesFromDetails, 1) \
- F(DebugPropertyIndexFromDetails, 1) \
- F(DebugInterceptorInfo, 1) \
- F(DebugNamedInterceptorPropertyNames, 1) \
- F(DebugIndexedInterceptorElementNames, 1) \
- F(DebugNamedInterceptorPropertyValue, 2) \
- F(DebugIndexedInterceptorElementValue, 2) \
- F(CheckExecutionState, 1) \
- F(GetFrameCount, 1) \
- F(GetFrameDetails, 2) \
- F(GetScopeCount, 2) \
- F(GetScopeDetails, 3) \
- F(DebugPrintScopes, 0) \
- F(GetCFrames, 1) \
- F(GetThreadCount, 1) \
- F(GetThreadDetails, 2) \
- F(GetBreakLocations, 1) \
- F(SetFunctionBreakPoint, 3) \
- F(SetScriptBreakPoint, 3) \
- F(ClearBreakPoint, 1) \
- F(ChangeBreakOnException, 2) \
- F(PrepareStep, 3) \
- F(ClearStepping, 0) \
- F(DebugEvaluate, 4) \
- F(DebugEvaluateGlobal, 3) \
- F(DebugGetLoadedScripts, 0) \
- F(DebugReferencedBy, 3) \
- F(DebugConstructedBy, 2) \
- F(DebugGetPrototype, 1) \
- F(SystemBreak, 0) \
- F(DebugDisassembleFunction, 1) \
- F(DebugDisassembleConstructor, 1) \
- F(FunctionGetInferredName, 1)
+ F(DebugBreak, 0, 1) \
+ F(SetDebugEventListener, 2, 1) \
+ F(Break, 0, 1) \
+ F(DebugGetPropertyDetails, 2, 1) \
+ F(DebugGetProperty, 2, 1) \
+ F(DebugLocalPropertyNames, 1, 1) \
+ F(DebugLocalElementNames, 1, 1) \
+ F(DebugPropertyTypeFromDetails, 1, 1) \
+ F(DebugPropertyAttributesFromDetails, 1, 1) \
+ F(DebugPropertyIndexFromDetails, 1, 1) \
+ F(DebugInterceptorInfo, 1, 1) \
+ F(DebugNamedInterceptorPropertyNames, 1, 1) \
+ F(DebugIndexedInterceptorElementNames, 1, 1) \
+ F(DebugNamedInterceptorPropertyValue, 2, 1) \
+ F(DebugIndexedInterceptorElementValue, 2, 1) \
+ F(CheckExecutionState, 1, 1) \
+ F(GetFrameCount, 1, 1) \
+ F(GetFrameDetails, 2, 1) \
+ F(GetScopeCount, 2, 1) \
+ F(GetScopeDetails, 3, 1) \
+ F(DebugPrintScopes, 0, 1) \
+ F(GetCFrames, 1, 1) \
+ F(GetThreadCount, 1, 1) \
+ F(GetThreadDetails, 2, 1) \
+ F(GetBreakLocations, 1, 1) \
+ F(SetFunctionBreakPoint, 3, 1) \
+ F(SetScriptBreakPoint, 3, 1) \
+ F(ClearBreakPoint, 1, 1) \
+ F(ChangeBreakOnException, 2, 1) \
+ F(PrepareStep, 3, 1) \
+ F(ClearStepping, 0, 1) \
+ F(DebugEvaluate, 4, 1) \
+ F(DebugEvaluateGlobal, 3, 1) \
+ F(DebugGetLoadedScripts, 0, 1) \
+ F(DebugReferencedBy, 3, 1) \
+ F(DebugConstructedBy, 2, 1) \
+ F(DebugGetPrototype, 1, 1) \
+ F(SystemBreak, 0, 1) \
+ F(DebugDisassembleFunction, 1, 1) \
+ F(DebugDisassembleConstructor, 1, 1) \
+ F(FunctionGetInferredName, 1, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
@@ -313,7 +316,7 @@ namespace internal {
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
- F(ListNatives, 0)
+ F(ListNatives, 0, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUG(F)
#endif
@@ -336,7 +339,7 @@ namespace internal {
class Runtime : public AllStatic {
public:
enum FunctionId {
-#define F(name, nargs) k##name,
+#define F(name, nargs, ressize) k##name,
RUNTIME_FUNCTION_LIST(F)
kNofFunctions
#undef F
@@ -357,6 +360,9 @@ class Runtime : public AllStatic {
// arguments.
int nargs;
int stub_id;
+ // Size of result, if complex (larger than a single pointer),
+ // otherwise zero.
+ int result_size;
};
// Get the runtime function with the given function id.
diff --git a/V8Binding/v8/src/serialize.cc b/V8Binding/v8/src/serialize.cc
index f65235a..e0ee4bd 100644
--- a/V8Binding/v8/src/serialize.cc
+++ b/V8Binding/v8/src/serialize.cc
@@ -70,7 +70,7 @@ const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
// These values are special allocation space tags used for
// serialization.
-// Mar the pages executable on platforms that support it.
+// Mark the pages executable on platforms that support it.
const int kLargeCode = LAST_SPACE + 1;
// Allocate extra remembered-set bits.
const int kLargeFixedArray = LAST_SPACE + 2;
@@ -541,7 +541,7 @@ void ExternalReferenceTable::PopulateTable() {
#undef DEF_ENTRY_A
// Runtime functions
-#define RUNTIME_ENTRY(name, nargs) \
+#define RUNTIME_ENTRY(name, nargs, ressize) \
{ RUNTIME_FUNCTION, \
Runtime::k##name, \
"Runtime::" #name },
@@ -935,6 +935,15 @@ class ReferenceUpdater: public ObjectVisitor {
}
}
+ virtual void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Address encoded_target = serializer_->GetSavedAddress(target);
+ offsets_.Add(rinfo->target_address_address() - obj_address_);
+ addresses_.Add(encoded_target);
+ }
+
+
virtual void VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = reference_encoder_->Encode(*p);
@@ -1053,7 +1062,7 @@ void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
- CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
// We need a counter function during serialization to resolve the
// references to counters in the code on the heap.
@@ -1093,6 +1102,14 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
+void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ bool serialized;
+ Encode(target, &serialized);
+}
+
+
class GlobalHandlesRetriever: public ObjectVisitor {
public:
explicit GlobalHandlesRetriever(List<Object**>* handles)
@@ -1255,10 +1272,7 @@ Address Serializer::PutObject(HeapObject* obj) {
SaveAddress(obj, addr);
if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- // Ensure Code objects contain Object pointers, not Addresses.
- code->ConvertICTargetsFromAddressToObject();
- LOG(CodeMoveEvent(code->address(), addr));
+ LOG(CodeMoveEvent(obj->address(), addr));
}
// Write out the object prologue: type, size, and simulated address of obj.
@@ -1290,12 +1304,6 @@ Address Serializer::PutObject(HeapObject* obj) {
}
#endif
- if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- // Convert relocations from Object* to Address in Code objects
- code->ConvertICTargetsFromObjectToAddress();
- }
-
objects_++;
return addr;
}
@@ -1387,7 +1395,7 @@ void Deserializer::Deserialize() {
// No active threads.
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
- ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
+ ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
reference_decoder_ = new ExternalReferenceDecoder();
// By setting linear allocation only, we forbid the use of free list
// allocation which is not predicted by SimulatedAddress.
@@ -1422,6 +1430,14 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
}
+void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+ Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
+ rinfo->set_target_address(target_object->instruction_start());
+}
+
+
void Deserializer::VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
@@ -1616,10 +1632,7 @@ Object* Deserializer::GetObject() {
obj->IterateBody(type, size, this);
if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- // Convert relocations from Object* to Address in Code objects
- code->ConvertICTargetsFromObjectToAddress();
- LOG(CodeMoveEvent(a, code->address()));
+ LOG(CodeMoveEvent(a, obj->address()));
}
objects_++;
return o;
diff --git a/V8Binding/v8/src/serialize.h b/V8Binding/v8/src/serialize.h
index 1b24065..c901480 100644
--- a/V8Binding/v8/src/serialize.h
+++ b/V8Binding/v8/src/serialize.h
@@ -155,7 +155,7 @@ class Serializer: public ObjectVisitor {
friend class ReferenceUpdater;
virtual void VisitPointers(Object** start, Object** end);
-
+ virtual void VisitCodeTarget(RelocInfo* rinfo);
bool IsVisited(HeapObject* obj);
Address GetSavedAddress(HeapObject* obj);
@@ -197,7 +197,7 @@ class Serializer: public ObjectVisitor {
int flags_end_; // The position right after the flags.
- // An array of per-space SimulatedHeapSpacees used as memory allocators.
+ // An array of per-space SimulatedHeapSpaces used as memory allocators.
SimulatedHeapSpace* allocator_[LAST_SPACE+1];
// A list of global handles at serialization time.
List<Object**> global_handles_;
@@ -289,6 +289,7 @@ class Deserializer: public ObjectVisitor {
private:
virtual void VisitPointers(Object** start, Object** end);
+ virtual void VisitCodeTarget(RelocInfo* rinfo);
virtual void VisitExternalReferences(Address* start, Address* end);
virtual void VisitRuntimeEntry(RelocInfo* rinfo);
diff --git a/V8Binding/v8/src/spaces.cc b/V8Binding/v8/src/spaces.cc
index de9b233..43abaa4 100644
--- a/V8Binding/v8/src/spaces.cc
+++ b/V8Binding/v8/src/spaces.cc
@@ -145,6 +145,128 @@ Page::RSetState Page::rset_state_ = Page::IN_USE;
#endif
// -----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+ ASSERT(code_range_ == NULL);
+
+ code_range_ = new VirtualMemory(requested);
+ CHECK(code_range_ != NULL);
+ if (!code_range_->IsReserved()) {
+ delete code_range_;
+ code_range_ = NULL;
+ return false;
+ }
+
+ // We are sure that we have mapped a block of requested addresses.
+ ASSERT(code_range_->size() == requested);
+ LOG(NewEvent("CodeRange", code_range_->address(), requested));
+ allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+ current_allocation_block_index_ = 0;
+ return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right) {
+ // The entire point of CodeRange is that the difference between two
+ // addresses in the range can be represented as a signed 32-bit int,
+ // so the cast is semantically correct.
+ return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+ for (current_allocation_block_index_++;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Sort and merge the free blocks on the free list and the allocation list.
+ free_list_.AddAll(allocation_list_);
+ allocation_list_.Clear();
+ free_list_.Sort(&CompareFreeBlockAddress);
+ for (int i = 0; i < free_list_.length();) {
+ FreeBlock merged = free_list_[i];
+ i++;
+ // Add adjacent free blocks to the current merged block.
+ while (i < free_list_.length() &&
+ free_list_[i].start == merged.start + merged.size) {
+ merged.size += free_list_[i].size;
+ i++;
+ }
+ if (merged.size > 0) {
+ allocation_list_.Add(merged);
+ }
+ }
+ free_list_.Clear();
+
+ for (current_allocation_block_index_ = 0;
+ current_allocation_block_index_ < allocation_list_.length();
+ current_allocation_block_index_++) {
+ if (requested <= allocation_list_[current_allocation_block_index_].size) {
+ return; // Found a large enough allocation block.
+ }
+ }
+
+ // Code range is full or too fragmented.
+ V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+ ASSERT(current_allocation_block_index_ < allocation_list_.length());
+ if (requested > allocation_list_[current_allocation_block_index_].size) {
+ // Find an allocation block large enough. This function call may
+ // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
+ GetNextAllocationBlock(requested);
+ }
+ // Commit the requested memory at the start of the current allocation block.
+ *allocated = RoundUp(requested, Page::kPageSize);
+ FreeBlock current = allocation_list_[current_allocation_block_index_];
+ if (*allocated >= current.size - Page::kPageSize) {
+ // Don't leave a small free block, useless for a large object or chunk.
+ *allocated = current.size;
+ }
+ ASSERT(*allocated <= current.size);
+ if (!code_range_->Commit(current.start, *allocated, true)) {
+ *allocated = 0;
+ return NULL;
+ }
+ allocation_list_[current_allocation_block_index_].start += *allocated;
+ allocation_list_[current_allocation_block_index_].size -= *allocated;
+ if (*allocated == current.size) {
+ GetNextAllocationBlock(0); // This block is used up, get the next one.
+ }
+ return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+ free_list_.Add(FreeBlock(address, length));
+ code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+ delete code_range_; // Frees all memory in the virtual memory range.
+ code_range_ = NULL;
+ free_list_.Free();
+ allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
// MemoryAllocator
//
int MemoryAllocator::capacity_ = 0;
@@ -226,8 +348,12 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
-
- void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
+ void* mem;
+ if (executable == EXECUTABLE && CodeRange::exists()) {
+ mem = CodeRange::AllocateRawMemory(requested, allocated);
+ } else {
+ mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+ }
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
@@ -236,7 +362,11 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested,
void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
- OS::Free(mem, length);
+ if (CodeRange::contains(static_cast<Address>(mem))) {
+ CodeRange::FreeRawMemory(mem, length);
+ } else {
+ OS::Free(mem, length);
+ }
Counters::memory_allocated.Decrement(length);
size_ -= length;
ASSERT(size_ >= 0);
@@ -827,13 +957,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// have their remembered set bits set if required as determined
// by the visitor.
int size = object->Size();
- if (object->IsCode()) {
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
- object->IterateBody(map->instance_type(), size, visitor);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- } else {
- object->IterateBody(map->instance_type(), size, visitor);
- }
+ object->IterateBody(map->instance_type(), size, visitor);
current += size;
}
@@ -1906,7 +2030,7 @@ void OldSpace::ReportStatistics() {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address();
+ int intoff = rset_addr - p->address() - Page::kRSetOffset;
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2171,7 +2295,7 @@ void FixedSpace::ReportStatistics() {
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address();
+ int intoff = rset_addr - p->address() - Page::kRSetOffset;
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2574,11 +2698,9 @@ void LargeObjectSpace::Verify() {
// Byte arrays and strings don't have interior pointers.
if (object->IsCode()) {
VerifyPointersVisitor code_visitor;
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(),
object->Size(),
&code_visitor);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index 98663db..76b88ef 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -315,6 +315,72 @@ class Space : public Malloced {
// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements. This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space. On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+ // Reserves a range of virtual memory, but does not commit any of it.
+ // Can only be called once, at heap initialization time.
+ // Returns false on failure.
+ static bool Setup(const size_t requested_size);
+
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ static void TearDown();
+
+ static bool exists() { return code_range_ != NULL; }
+ static bool contains(Address address) {
+ if (code_range_ == NULL) return false;
+ Address start = static_cast<Address>(code_range_->address());
+ return start <= address && address < start + code_range_->size();
+ }
+
+ // Allocates a chunk of memory from the large-object portion of
+ // the code range. On platforms with no separate code range, should
+ // not be called.
+ static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+ static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+ // The reserved range of virtual memory that all code objects are put in.
+ static VirtualMemory* code_range_;
+ // Plain old data class, just a struct plus a constructor.
+ class FreeBlock {
+ public:
+ FreeBlock(Address start_arg, size_t size_arg)
+ : start(start_arg), size(size_arg) {}
+ FreeBlock(void* start_arg, size_t size_arg)
+ : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+ Address start;
+ size_t size;
+ };
+
+ // Freed blocks of memory are added to the free list. When the allocation
+ // list is exhausted, the free list is sorted and merged to make the new
+ // allocation list.
+ static List<FreeBlock> free_list_;
+ // Memory is allocated from the free blocks on the allocation list.
+ // The block at current_allocation_block_index_ is the current block.
+ static List<FreeBlock> allocation_list_;
+ static int current_allocation_block_index_;
+
+ // Finds a block on the allocation list that contains at least the
+ // requested amount of memory. If none is found, sorts and merges
+ // the existing free memory blocks, and searches again.
+ // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+ static void GetNextAllocationBlock(size_t requested);
+ // Compares the start addresses of two free blocks.
+ static int CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right);
+};
+
+
+// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator manages chunks for the paged heap spaces (old space and map
// space). A paged chunk consists of pages. Pages in a chunk have contiguous
@@ -380,8 +446,9 @@ class MemoryAllocator : public AllStatic {
// function returns an invalid page pointer (NULL). The caller must check
// whether the returned page is valid (by calling Page::is_valid()). It is
// guaranteed that allocated pages have contiguous addresses. The actual
- // number of allocated page is returned in the output parameter
- // allocated_pages.
+ // number of allocated pages is returned in the output parameter
+ // allocated_pages. If the PagedSpace owner is executable and there is
+ // a code range, the pages are allocated from the code range.
static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner);
@@ -395,6 +462,9 @@ class MemoryAllocator : public AllStatic {
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
+ // If the flag is EXECUTABLE and a code range exists, the requested
+ // memory is allocated from the code range. If a code range exists
+ // and the freed memory is in it, the code range manages the freed memory.
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable);
@@ -445,12 +515,12 @@ class MemoryAllocator : public AllStatic {
// Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
- // If a chunk has at least 32 pages, the maximum heap size is about
- // 8 * 1024 * 32 * 8K = 2G bytes.
-#if defined(ANDROID)
- static const int kPagesPerChunk = 16;
+ // If a chunk has at least 16 pages, the maximum heap size is about
+ // 8K * 8K * 16 = 1G bytes.
+#ifdef V8_TARGET_ARCH_X64
+ static const int kPagesPerChunk = 32;
#else
- static const int kPagesPerChunk = 64;
+ static const int kPagesPerChunk = 16;
#endif
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
@@ -1686,7 +1756,7 @@ class CellSpace : public FixedSpace {
#endif
public:
- TRACK_MEMORY("MapSpace")
+ TRACK_MEMORY("CellSpace")
};
diff --git a/V8Binding/v8/src/string-stream.cc b/V8Binding/v8/src/string-stream.cc
index cec4167..8c62a45 100644
--- a/V8Binding/v8/src/string-stream.cc
+++ b/V8Binding/v8/src/string-stream.cc
@@ -251,7 +251,7 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
}
-SmartPointer<const char> StringStream::ToCString() {
+SmartPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
memcpy(str, buffer_, length_);
str[length_] = '\0';
diff --git a/V8Binding/v8/src/string-stream.h b/V8Binding/v8/src/string-stream.h
index 6649f18..323a6d6 100644
--- a/V8Binding/v8/src/string-stream.h
+++ b/V8Binding/v8/src/string-stream.h
@@ -141,7 +141,7 @@ class StringStream {
void OutputToStdOut();
void Log();
Handle<String> ToString();
- SmartPointer<const char> ToCString();
+ SmartPointer<const char> ToCString() const;
// Object printing support.
void PrintName(Object* o);
diff --git a/V8Binding/v8/src/string.js b/V8Binding/v8/src/string.js
index 263fac5..fbdc307 100644
--- a/V8Binding/v8/src/string.js
+++ b/V8Binding/v8/src/string.js
@@ -62,7 +62,7 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- var char_code = %_FastCharCodeAt(this, index);
+ var char_code = %_FastCharCodeAt(this, pos);
if (!%_IsSmi(char_code)) {
var subject = ToString(this);
var index = TO_INTEGER(pos);
@@ -184,6 +184,14 @@ function SubString(string, start, end) {
}
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned. It is used when the
+// needle is a string rather than a regexp. In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, "", "", -1, -1];
+
+
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
var subject = ToString(this);
@@ -224,14 +232,6 @@ function StringReplace(search, replace) {
}
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
// Helper function for regular expressions in String.prototype.replace.
function StringReplaceRegExp(subject, regexp, replace) {
replace = ToString(replace);
@@ -370,8 +370,8 @@ function addCaptureString(builder, matchInfo, index) {
// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
// should be 'abcd' and not 'dddd' (or anything else).
function StringReplaceRegExpWithFunction(subject, regexp, replace) {
- var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(lastMatchInfo)) return subject;
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) return subject;
var result = new ReplaceResultBuilder(subject);
// There's at least one match. If the regexp is global, we have to loop
@@ -382,11 +382,11 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
if (regexp.global) {
var previous = 0;
do {
- result.addSpecialSlice(previous, lastMatchInfo[CAPTURE0]);
- var startOfMatch = lastMatchInfo[CAPTURE0];
- previous = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+ var startOfMatch = matchInfo[CAPTURE0];
+ previous = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
// Increment previous if we matched an empty string, as per ECMA-262
@@ -401,20 +401,20 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Per ECMA-262 15.10.6.2, if the previous index is greater than the
// string length, there is no match
- lastMatchInfo = (previous > subject.length)
+ matchInfo = (previous > subject.length)
? null
: DoRegExpExec(regexp, subject, previous);
- } while (!IS_NULL(lastMatchInfo));
+ } while (!IS_NULL(matchInfo));
// Tack on the final right substring after the last match, if necessary.
if (previous < subject.length) {
result.addSpecialSlice(previous, subject.length);
}
} else { // Not a global regexp, no need to loop.
- result.addSpecialSlice(0, lastMatchInfo[CAPTURE0]);
- var endOfMatch = lastMatchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
- // Can't use lastMatchInfo any more from here, since the function could
+ result.addSpecialSlice(0, matchInfo[CAPTURE0]);
+ var endOfMatch = matchInfo[CAPTURE1];
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ // Can't use matchInfo any more from here, since the function could
// overwrite it.
result.addSpecialSlice(endOfMatch, subject.length);
}
@@ -424,20 +424,20 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
+function ApplyReplacementFunction(replace, matchInfo, subject) {
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
- var index = lastMatchInfo[CAPTURE0];
+ var index = matchInfo[CAPTURE0];
// The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+ var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
if (m == 1) {
- var s = CaptureString(subject, lastMatchInfo, 0);
+ var s = CaptureString(subject, matchInfo, 0);
// Don't call directly to avoid exposing the built-in global object.
return replace.call(null, s, index, subject);
}
var parameters = $Array(m + 2);
for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, lastMatchInfo, j);
+ parameters[j] = CaptureString(subject, matchInfo, j);
}
parameters[j] = index;
parameters[j + 1] = subject;
@@ -539,14 +539,14 @@ function StringSplit(separator, limit) {
return result;
}
- var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
+ var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
- if (IS_NULL(lastMatchInfo)) {
+ if (IS_NULL(matchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
return result;
}
- var endIndex = lastMatchInfo[CAPTURE1];
+ var endIndex = matchInfo[CAPTURE1];
// We ignore a zero-length match at the currentIndex.
if (startIndex === endIndex && endIndex === currentIndex) {
@@ -554,12 +554,12 @@ function StringSplit(separator, limit) {
continue;
}
- result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+ result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
if (result.length === limit) return result;
- for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
- var start = lastMatchInfo[CAPTURE(i)];
- var end = lastMatchInfo[CAPTURE(i + 1)];
+ for (var i = 2; i < NUMBER_OF_CAPTURES(matchInfo); i += 2) {
+ var start = matchInfo[CAPTURE(i)];
+ var end = matchInfo[CAPTURE(i + 1)];
if (start != -1 && end != -1) {
result[result.length] = SubString(subject, start, end);
} else {
@@ -574,16 +574,16 @@ function StringSplit(separator, limit) {
// ECMA-262 section 15.5.4.14
-// Helper function used by split. This version returns the lastMatchInfo
+// Helper function used by split. This version returns the matchInfo
// instead of allocating a new array with basically the same information.
function splitMatch(separator, subject, current_index, start_index) {
if (IS_REGEXP(separator)) {
- var lastMatchInfo = DoRegExpExec(separator, subject, start_index);
- if (lastMatchInfo == null) return null;
+ var matchInfo = DoRegExpExec(separator, subject, start_index);
+ if (matchInfo == null) return null;
// Section 15.5.4.14 paragraph two says that we do not allow zero length
// matches at the end of the string.
- if (lastMatchInfo[CAPTURE0] === subject.length) return null;
- return lastMatchInfo;
+ if (matchInfo[CAPTURE0] === subject.length) return null;
+ return matchInfo;
}
var separatorIndex = subject.indexOf(separator, start_index);
diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc
index 2906c22..e10dc61 100644
--- a/V8Binding/v8/src/stub-cache.cc
+++ b/V8Binding/v8/src/stub-cache.cc
@@ -735,28 +735,17 @@ Handle<Code> ComputeCallMiss(int argc) {
Object* LoadCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
AccessorInfo* callback = AccessorInfo::cast(args[2]);
- Handle<Object> data = args.at<Object>(3);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- Handle<String> name = args.at<String>(4);
- // NOTE: If we can align the structure of an AccessorInfo with the
- // locations of the arguments to this function maybe we don't have
- // to explicitly create the structure but can just pass a pointer
- // into the stack.
- LOG(ApiNamedPropertyAccess("load", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(holder));
+ v8::AccessorInfo info(args.arguments());
HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- result = fun(v8::Utils::ToLocal(name), info);
+ result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
if (result.IsEmpty()) return Heap::undefined_value();
@@ -765,7 +754,7 @@ Object* LoadCallbackProperty(Arguments args) {
Object* StoreCallbackProperty(Arguments args) {
- Handle<JSObject> recv = args.at<JSObject>(0);
+ JSObject* recv = JSObject::cast(args[0]);
AccessorInfo* callback = AccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
@@ -773,11 +762,9 @@ Object* StoreCallbackProperty(Arguments args) {
Handle<String> name = args.at<String>(2);
Handle<Object> value = args.at<Object>(3);
HandleScope scope;
- Handle<Object> data(callback->data());
- LOG(ApiNamedPropertyAccess("store", *recv, *name));
- v8::AccessorInfo info(v8::Utils::ToLocal(recv),
- v8::Utils::ToLocal(data),
- v8::Utils::ToLocal(recv));
+ LOG(ApiNamedPropertyAccess("store", recv, *name));
+ CustomArguments custom_args(callback->data(), recv, recv);
+ v8::AccessorInfo info(custom_args.end());
{
// Leaving JavaScript.
VMState state(EXTERNAL);
@@ -795,11 +782,11 @@ Object* StoreCallbackProperty(Arguments args) {
* provide any value for the given name.
*/
Object* LoadPropertyWithInterceptorOnly(Arguments args) {
- Handle<JSObject> receiver_handle = args.at<JSObject>(0);
- Handle<JSObject> holder_handle = args.at<JSObject>(1);
+ JSObject* receiver_handle = JSObject::cast(args[0]);
+ JSObject* holder_handle = JSObject::cast(args[1]);
Handle<String> name_handle = args.at<String>(2);
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
- Handle<Object> data_handle = args.at<Object>(4);
+ Object* data_handle = args[4];
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
@@ -808,9 +795,8 @@ Object* LoadPropertyWithInterceptorOnly(Arguments args) {
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(data_handle, receiver_handle, holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
@@ -861,9 +847,8 @@ static Object* LoadWithInterceptor(Arguments* args,
{
// Use the interceptor getter.
- v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(data_handle),
- v8::Utils::ToLocal(holder_handle));
+ CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
+ v8::AccessorInfo info(args.end());
HandleScope scope;
v8::Handle<v8::Value> r;
{
diff --git a/V8Binding/v8/src/third_party/dtoa/dtoa.c b/V8Binding/v8/src/third_party/dtoa/dtoa.c
index fadc6d1..8917d9d 100644
--- a/V8Binding/v8/src/third_party/dtoa/dtoa.c
+++ b/V8Binding/v8/src/third_party/dtoa/dtoa.c
@@ -501,7 +501,9 @@ Balloc
#endif
ACQUIRE_DTOA_LOCK(0);
- if ((rv = freelist[k])) {
+ /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0). */
+ /* but this case seems very unlikely. */
+ if (k <= Kmax && (rv = freelist[k])) {
freelist[k] = rv->next;
}
else {
@@ -511,7 +513,7 @@ Balloc
#else
len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
/sizeof(double);
- if (pmem_next - private_mem + len <= PRIVATE_mem) {
+ if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) {
rv = (Bigint*)pmem_next;
pmem_next += len;
}
@@ -535,10 +537,14 @@ Bfree
#endif
{
if (v) {
- ACQUIRE_DTOA_LOCK(0);
- v->next = freelist[v->k];
- freelist[v->k] = v;
- FREE_DTOA_LOCK(0);
+ if (v->k > Kmax)
+ free((void*)v);
+ else {
+ ACQUIRE_DTOA_LOCK(0);
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
+ FREE_DTOA_LOCK(0);
+ }
}
}
diff --git a/V8Binding/v8/src/top.cc b/V8Binding/v8/src/top.cc
index 5c22bcf..aa7788e 100644
--- a/V8Binding/v8/src/top.cc
+++ b/V8Binding/v8/src/top.cc
@@ -98,7 +98,8 @@ void Top::InitializeThreadLocal() {
thread_local_.stack_is_cooked_ = false;
thread_local_.try_catch_handler_ = NULL;
thread_local_.context_ = NULL;
- thread_local_.thread_id_ = ThreadManager::kInvalidId;
+ int id = ThreadManager::CurrentId();
+ thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
thread_local_.external_caught_exception_ = false;
thread_local_.failed_access_check_callback_ = NULL;
clear_pending_exception();
@@ -690,12 +691,17 @@ void Top::ComputeLocation(MessageLocation* target) {
void Top::ReportUncaughtException(Handle<Object> exception,
MessageLocation* location,
Handle<String> stack_trace) {
- Handle<Object> message =
- MessageHandler::MakeMessageObject("uncaught_exception",
- location,
- HandleVector<Object>(&exception, 1),
- stack_trace);
-
+ Handle<Object> message;
+ if (!Bootstrapper::IsActive()) {
+ // It's not safe to try to make message objects while the bootstrapper
+ // is active since the infrastructure may not have been properly
+ // initialized.
+ message =
+ MessageHandler::MakeMessageObject("uncaught_exception",
+ location,
+ HandleVector<Object>(&exception, 1),
+ stack_trace);
+ }
// Report the uncaught exception.
MessageHandler::ReportMessage(location, message);
}
@@ -769,10 +775,15 @@ void Top::DoThrow(Object* exception,
ComputeLocation(&potential_computed_location);
location = &potential_computed_location;
}
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTrace();
- message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+ if (!Bootstrapper::IsActive()) {
+ // It's not safe to try to make message objects or collect stack
+ // traces while the bootstrapper is active since the infrastructure
+ // may not have been properly initialized.
+ Handle<String> stack_trace;
+ if (FLAG_trace_exception) stack_trace = StackTrace();
+ message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+ location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+ }
}
// Save the message for reporting if the the exception remains uncaught.
diff --git a/V8Binding/v8/src/top.h b/V8Binding/v8/src/top.h
index 5b3d6a0..ae94f08 100644
--- a/V8Binding/v8/src/top.h
+++ b/V8Binding/v8/src/top.h
@@ -78,6 +78,12 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_ == NULL);
+ }
};
#define TOP_ADDRESS_LIST(C) \
@@ -316,6 +322,7 @@ class Top {
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
static char* ArchiveThread(char* to);
static char* RestoreThread(char* from);
+ static void FreeThreadResources() { thread_local_.Free(); }
static const char* kStackOverflowMessage;
diff --git a/V8Binding/v8/src/uri.js b/V8Binding/v8/src/uri.js
index 0dfe765..5af71b6 100644
--- a/V8Binding/v8/src/uri.js
+++ b/V8Binding/v8/src/uri.js
@@ -30,6 +30,11 @@
// Expect $String = global.String;
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
+
+
function URIAddEncodedOctetToBuffer(octet, result, index) {
result[index++] = 37; // Char code of '%'.
result[index++] = hexCharCodeArray[octet >> 4];
@@ -320,11 +325,6 @@ function URIEncodeComponent(component) {
}
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
function HexValueOf(c) {
var code = c.charCodeAt(0);
diff --git a/V8Binding/v8/src/usage-analyzer.cc b/V8Binding/v8/src/usage-analyzer.cc
index 5514f40..23a4d9f 100644
--- a/V8Binding/v8/src/usage-analyzer.cc
+++ b/V8Binding/v8/src/usage-analyzer.cc
@@ -44,45 +44,12 @@ class UsageComputer: public AstVisitor {
public:
static bool Traverse(AstNode* node);
- void VisitBlock(Block* node);
- void VisitDeclaration(Declaration* node);
- void VisitExpressionStatement(ExpressionStatement* node);
- void VisitEmptyStatement(EmptyStatement* node);
- void VisitIfStatement(IfStatement* node);
- void VisitContinueStatement(ContinueStatement* node);
- void VisitBreakStatement(BreakStatement* node);
- void VisitReturnStatement(ReturnStatement* node);
- void VisitWithEnterStatement(WithEnterStatement* node);
- void VisitWithExitStatement(WithExitStatement* node);
- void VisitSwitchStatement(SwitchStatement* node);
- void VisitLoopStatement(LoopStatement* node);
- void VisitForInStatement(ForInStatement* node);
- void VisitTryCatch(TryCatch* node);
- void VisitTryFinally(TryFinally* node);
- void VisitDebuggerStatement(DebuggerStatement* node);
- void VisitFunctionLiteral(FunctionLiteral* node);
- void VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral* node);
- void VisitConditional(Conditional* node);
- void VisitSlot(Slot* node);
- void VisitVariable(Variable* node);
- void VisitVariableProxy(VariableProxy* node);
- void VisitLiteral(Literal* node);
- void VisitRegExpLiteral(RegExpLiteral* node);
- void VisitObjectLiteral(ObjectLiteral* node);
- void VisitArrayLiteral(ArrayLiteral* node);
- void VisitCatchExtensionObject(CatchExtensionObject* node);
- void VisitAssignment(Assignment* node);
- void VisitThrow(Throw* node);
- void VisitProperty(Property* node);
- void VisitCall(Call* node);
- void VisitCallEval(CallEval* node);
- void VisitCallNew(CallNew* node);
- void VisitCallRuntime(CallRuntime* node);
- void VisitUnaryOperation(UnaryOperation* node);
- void VisitCountOperation(CountOperation* node);
- void VisitBinaryOperation(BinaryOperation* node);
- void VisitCompareOperation(CompareOperation* node);
- void VisitThisFunction(ThisFunction* node);
+ // AST node visit functions.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ void VisitVariable(Variable* var);
private:
int weight_;
@@ -329,13 +296,9 @@ void UsageComputer::VisitCall(Call* node) {
}
-void UsageComputer::VisitCallEval(CallEval* node) {
- VisitCall(node);
-}
-
-
void UsageComputer::VisitCallNew(CallNew* node) {
- VisitCall(node);
+ Read(node->expression());
+ ReadList(node->arguments());
}
diff --git a/V8Binding/v8/src/utils.cc b/V8Binding/v8/src/utils.cc
index d56d279..3c684b8 100644
--- a/V8Binding/v8/src/utils.cc
+++ b/V8Binding/v8/src/utils.cc
@@ -239,7 +239,7 @@ int WriteChars(const char* filename,
FILE* f = OS::FOpen(filename, "wb");
if (f == NULL) {
if (verbose) {
- OS::PrintError("Cannot open file %s for reading.\n", filename);
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
}
return 0;
}
diff --git a/V8Binding/v8/src/v8-counters.h b/V8Binding/v8/src/v8-counters.h
index 0b941f6..e360b55 100644
--- a/V8Binding/v8/src/v8-counters.h
+++ b/V8Binding/v8/src/v8-counters.h
@@ -142,6 +142,8 @@ namespace internal {
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
+ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
+ SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
diff --git a/V8Binding/v8/src/v8.cc b/V8Binding/v8/src/v8.cc
index a204158..f0115ec 100644
--- a/V8Binding/v8/src/v8.cc
+++ b/V8Binding/v8/src/v8.cc
@@ -71,6 +71,14 @@ bool V8::Initialize(Deserializer *des) {
::assembler::arm::Simulator::Initialize();
#endif
+ { // NOLINT
+ // Ensure that the thread has a valid stack guard. The v8::Locker object
+ // will ensure this too, but we don't have to use lockers if we are only
+ // using one thread.
+ ExecutionAccess lock;
+ StackGuard::InitThread(lock);
+ }
+
// Setup the object heap
ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) {
@@ -162,9 +170,11 @@ uint32_t V8::Random() {
bool V8::IdleNotification(bool is_high_priority) {
- if (!FLAG_use_idle_notification) return false;
+ // Returning true tells the caller that there is no need to call
+ // IdleNotification again.
+ if (!FLAG_use_idle_notification) return true;
// Ignore high priority instances of V8.
- if (is_high_priority) return false;
+ if (is_high_priority) return true;
// Tell the heap that it may want to adjust.
return Heap::IdleNotification();
diff --git a/V8Binding/v8/src/v8.h b/V8Binding/v8/src/v8.h
index 50be6df..7786d66 100644
--- a/V8Binding/v8/src/v8.h
+++ b/V8Binding/v8/src/v8.h
@@ -51,11 +51,6 @@
#error both DEBUG and NDEBUG are set
#endif
-// Enable debugger support by default, unless it is in ANDROID
-#if !defined(ENABLE_DEBUGGER_SUPPORT) && !defined(ANDROID)
-#define ENABLE_DEBUGGER_SUPPORT
-#endif
-
// Basic includes
#include "../include/v8.h"
#include "globals.h"
diff --git a/V8Binding/v8/src/v8natives.js b/V8Binding/v8/src/v8natives.js
index be92347..2fecee8 100644
--- a/V8Binding/v8/src/v8natives.js
+++ b/V8Binding/v8/src/v8natives.js
@@ -276,6 +276,13 @@ function ObjectLookupSetter(name) {
}
+function ObjectKeys(obj) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError('object_keys_non_object', [obj]);
+ return %LocalKeys(obj);
+}
+
+
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -304,6 +311,9 @@ function SetupObject() {
"__defineSetter__", ObjectDefineSetter,
"__lookupSetter__", ObjectLookupSetter
));
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "keys", ObjectKeys
+ ));
}
SetupObject();
@@ -514,7 +524,7 @@ function FunctionSourceString(func) {
}
var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source)) {
+ if (!IS_STRING(source) || %FunctionIsBuiltin(func)) {
var name = %FunctionGetName(func);
if (name) {
// Mimic what KJS does.
@@ -524,12 +534,6 @@ function FunctionSourceString(func) {
}
}
- // Censor occurrences of internal calls. We do that for all
- // functions and don't cache under the assumption that people rarly
- // convert functions to strings. Note that we (apparently) can't
- // use regular expression literals in natives files.
- var regexp = ORIGINAL_REGEXP("%(\\w+\\()", "gm");
- if (source.match(regexp)) source = source.replace(regexp, "$1");
var name = %FunctionGetName(func);
return 'function ' + name + source;
}
diff --git a/V8Binding/v8/src/v8threads.cc b/V8Binding/v8/src/v8threads.cc
index 3022a7e..80a7cd9 100644
--- a/V8Binding/v8/src/v8threads.cc
+++ b/V8Binding/v8/src/v8threads.cc
@@ -56,10 +56,20 @@ Locker::Locker() : has_lock_(false), top_level_(true) {
if (!internal::ThreadManager::IsLockedByCurrentThread()) {
internal::ThreadManager::Lock();
has_lock_ = true;
+ // Make sure that V8 is initialized. Archiving of threads interferes
+ // with deserialization by adding additional root pointers, so we must
+ // initialize here, before anyone can call ~Locker() or Unlocker().
+ if (!internal::V8::IsRunning()) {
+ V8::Initialize();
+ }
// This may be a locker within an unlocker in which case we have to
// get the saved state for this thread and restore it.
if (internal::ThreadManager::RestoreThread()) {
top_level_ = false;
+ } else {
+ internal::ExecutionAccess access;
+ internal::StackGuard::ClearThread(access);
+ internal::StackGuard::InitThread(access);
}
}
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
@@ -77,7 +87,9 @@ bool Locker::IsLocked() {
Locker::~Locker() {
ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
if (has_lock_) {
- if (!top_level_) {
+ if (top_level_) {
+ internal::ThreadManager::FreeThreadResources();
+ } else {
internal::ThreadManager::ArchiveThread();
}
internal::ThreadManager::Unlock();
@@ -139,11 +151,14 @@ bool ThreadManager::RestoreThread() {
ThreadState* state =
reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
if (state == NULL) {
+ // This is a new thread.
+ StackGuard::InitThread(access);
return false;
}
char* from = state->data();
from = HandleScopeImplementer::RestoreThread(from);
from = Top::RestoreThread(from);
+ from = Relocatable::RestoreState(from);
#ifdef ENABLE_DEBUGGER_SUPPORT
from = Debug::RestoreDebug(from);
#endif
@@ -183,7 +198,8 @@ static int ArchiveSpacePerThread() {
#endif
StackGuard::ArchiveSpacePerThread() +
RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread();
+ Bootstrapper::ArchiveSpacePerThread() +
+ Relocatable::ArchiveSpacePerThread();
}
@@ -273,6 +289,7 @@ void ThreadManager::EagerlyArchiveThread() {
// in ThreadManager::Iterate(ObjectVisitor*).
to = HandleScopeImplementer::ArchiveThread(to);
to = Top::ArchiveThread(to);
+ to = Relocatable::ArchiveState(to);
#ifdef ENABLE_DEBUGGER_SUPPORT
to = Debug::ArchiveDebug(to);
#endif
@@ -284,6 +301,18 @@ void ThreadManager::EagerlyArchiveThread() {
}
+void ThreadManager::FreeThreadResources() {
+ HandleScopeImplementer::FreeThreadResources();
+ Top::FreeThreadResources();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug::FreeThreadResources();
+#endif
+ StackGuard::FreeThreadResources();
+ RegExpStack::FreeThreadResources();
+ Bootstrapper::FreeThreadResources();
+}
+
+
bool ThreadManager::IsArchived() {
return Thread::HasThreadLocal(thread_state_key);
}
@@ -297,6 +326,7 @@ void ThreadManager::Iterate(ObjectVisitor* v) {
char* data = state->data();
data = HandleScopeImplementer::Iterate(v, data);
data = Top::Iterate(v, data);
+ data = Relocatable::Iterate(v, data);
}
}
diff --git a/V8Binding/v8/src/v8threads.h b/V8Binding/v8/src/v8threads.h
index f808e54..0684053 100644
--- a/V8Binding/v8/src/v8threads.h
+++ b/V8Binding/v8/src/v8threads.h
@@ -86,6 +86,7 @@ class ThreadManager : public AllStatic {
static void ArchiveThread();
static bool RestoreThread();
+ static void FreeThreadResources();
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
diff --git a/V8Binding/v8/src/variables.h b/V8Binding/v8/src/variables.h
index c2adb23..ca78b5f 100644
--- a/V8Binding/v8/src/variables.h
+++ b/V8Binding/v8/src/variables.h
@@ -171,7 +171,7 @@ class Variable: public ZoneObject {
UseCount* var_uses() { return &var_uses_; }
UseCount* obj_uses() { return &obj_uses_; }
- bool IsVariable(Handle<String> n) {
+ bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
@@ -185,6 +185,12 @@ class Variable: public ZoneObject {
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
+ // True if the variable is named eval and not known to be shadowed.
+ bool is_possibly_eval() const {
+ return IsVariable(Factory::eval_symbol()) &&
+ (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+ }
+
Variable* local_if_not_shadowed() const {
ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
return local_if_not_shadowed_;
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index 88256d1..a36e17c 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 10
+#define BUILD_NUMBER 14
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index 6304324..b4204a9 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -173,17 +173,31 @@ void CpuFeatures::Probe() {
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 13 bytes and int3 takes up one byte.
- Address patch_site = pc_;
- Memory::uint16_at(patch_site) = 0xBA49u; // movq r10, imm64
- // Write "0x00, call r10" starting at last byte of address. We overwrite
- // the 0x00 later, and this lets us write a uint32.
- Memory::uint32_at(patch_site + 9) = 0xD2FF4900u; // 0x00, call r10
- Memory::Address_at(patch_site + 2) = target;
+ // Load register with immediate 64 and call through a register instructions
+ // takes up 13 bytes and int3 takes up one byte.
+ static const int kCallCodeSize = 13;
+ int code_size = kCallCodeSize + guard_bytes;
+
+ // Create a code patcher.
+ CodePatcher patcher(pc_, code_size);
+
+ // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+
+ // Patch the code.
+ patcher.masm()->movq(r10, target, RelocInfo::NONE);
+ patcher.masm()->call(r10);
+
+ // Check that the size of the code generated is as expected.
+ ASSERT_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call.
for (int i = 0; i < guard_bytes; i++) {
- *(patch_site + 13 + i) = 0xCC; // int3
+ patcher.masm()->int3();
}
}
@@ -193,6 +207,9 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
for (int i = 0; i < instruction_count; i++) {
*(pc_ + i) = *(instructions + i);
}
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count);
}
// -----------------------------------------------------------------------------
@@ -275,7 +292,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
+ // existing code in it.
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size); // int3
@@ -362,7 +379,7 @@ void Assembler::bind(Label* L) {
void Assembler::GrowBuffer() {
- ASSERT(overflow()); // should not call this otherwise
+ ASSERT(buffer_overflow()); // should not call this otherwise
if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size
@@ -424,7 +441,7 @@ void Assembler::GrowBuffer() {
}
}
- ASSERT(!overflow());
+ ASSERT(!buffer_overflow());
}
@@ -1406,6 +1423,15 @@ void Assembler::neg(Register dst) {
}
+void Assembler::negl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xF7);
+ emit_modrm(0x3, dst);
+}
+
+
void Assembler::neg(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index 4d341c6..ff87286 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -447,7 +447,7 @@ class Assembler : public Malloced {
// Distance between the address of the code target in the call instruction
// and the return address. Checked in the debug build.
- static const int kPatchReturnSequenceLength = 3 + kPointerSize;
+ static const int kCallTargetAddressOffset = 3 + kPointerSize;
// Distance between start of patched return sequence and the emitted address
// to jump to (movq = REX.W 0xB8+r.).
static const int kPatchReturnSequenceAddressOffset = 2;
@@ -687,6 +687,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x4, dst, src);
}
+ void andl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
@@ -721,6 +725,7 @@ class Assembler : public Malloced {
void neg(Register dst);
void neg(const Operand& dst);
+ void negl(Register dst);
void not_(Register dst);
void not_(const Operand& dst);
@@ -729,6 +734,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src);
}
+ void orl(Register dst, Register src) {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+
void or_(Register dst, const Operand& src) {
arithmetic_op(0x0B, dst, src);
}
@@ -860,6 +869,10 @@ class Assembler : public Malloced {
arithmetic_op(0x33, dst, src);
}
+ void xorl(Register dst, Register src) {
+ arithmetic_op_32(0x33, dst, src);
+ }
+
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
@@ -1049,7 +1062,9 @@ class Assembler : public Malloced {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1279,7 +1294,7 @@ class Assembler : public Malloced {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc
index fab71fa..35eddc4 100644
--- a/V8Binding/v8/src/x64/builtins-x64.cc
+++ b/V8Binding/v8/src/x64/builtins-x64.cc
@@ -41,10 +41,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
__ movq(Operand(kScratchRegister, 0), rdi);
// The actual argument count has already been loaded into register
- // rax, but JumpToBuiltin expects rax to contain the number of
+ // rax, but JumpToRuntime expects rax to contain the number of
// arguments including the receiver.
__ incq(rax);
- __ JumpToBuiltin(ExternalReference(id));
+ __ JumpToRuntime(ExternalReference(id), 1);
}
@@ -61,8 +61,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Preserve the number of arguments on the stack. Must preserve both
// rax and rbx because these registers are used when copying the
// arguments and the receiver.
- ASSERT(kSmiTagSize == 1);
- __ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
+ __ Integer32ToSmi(rcx, rax);
__ push(rcx);
}
@@ -77,10 +76,13 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
// rbx holds a Smi, so we convery to dword offset by multiplying by 4.
+ // TODO(smi): Find a way to abstract indexing by a smi.
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
+ // TODO(smi): Find way to abstract indexing by a smi.
__ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
+ // 1 * kPointerSize is offset of receiver.
+ __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx);
}
@@ -192,8 +194,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done, non_function, function;
// The function to call is at position n+1 on the stack.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
- __ testl(rdi, Immediate(kSmiTagMask));
- __ j(zero, &non_function);
+ __ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(equal, &function);
@@ -213,8 +214,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label call_to_object, use_global_receiver, patch_receiver, done;
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
+ __ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -230,8 +230,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ EnterInternalFrame(); // preserves rax, rbx, rdi
// Store the arguments count on the stack (smi tagged).
- ASSERT(kSmiTag == 0);
- __ shl(rax, Immediate(kSmiTagSize));
+ __ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi); // save edi across the call
@@ -242,7 +241,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Get the arguments count and untag it.
__ pop(rax);
- __ shr(rax, Immediate(kSmiTagSize));
+ __ SmiToInteger32(rax, rax);
__ LeaveInternalFrame();
__ jmp(&patch_receiver);
@@ -355,8 +354,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Label okay;
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
- __ movq(rdx, rax);
- __ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
__ cmpq(rcx, rdx);
__ j(greater, &okay);
@@ -382,8 +380,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset));
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
+ __ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -446,7 +443,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
ParameterCount actual(rax);
- __ shr(rax, Immediate(kSmiTagSize));
+ __ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset));
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
@@ -455,6 +452,434 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+ __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ movq(result,
+ Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity >= 0);
+
+ // Load the initial map from the array function.
+ __ movq(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (initial_capacity > 0) {
+ size += FixedArray::SizeFor(initial_capacity);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (initial_capacity == 0) {
+ __ Move(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ movq(FieldOperand(scratch1, Array::kLengthOffset),
+ Immediate(initial_capacity));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ __ Move(scratch3, Factory::the_hole_value());
+ if (initial_capacity <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ for (int i = 0; i < initial_capacity; i++) {
+ __ movq(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(scratch1, 0), scratch3);
+ __ addq(scratch1, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(scratch1, scratch2);
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ movq(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ testq(array_size, array_size);
+ __ j(not_zero, &not_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ Move(elements_array, Factory::empty_fixed_array());
+ __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ SmiToInteger64(array_size, array_size);
+ __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ testq(array_size, array_size);
+ __ j(not_zero, &not_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset),
+ Immediate(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(&not_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ Move(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(elements_array, 0), scratch);
+ __ addq(elements_array, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(elements_array, elements_array_end);
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// rdi: constructor (built-in Array function)
+// rax: argc
+// rsp[0]: return address
+// rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments.
+ __ testq(rax, rax);
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ rdi,
+ rbx,
+ rcx,
+ rdx,
+ r8,
+ kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmpq(rax, Immediate(1));
+ __ j(not_equal, &argc_two_or_more);
+ __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
+ Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
+ __ j(not_positive_smi, call_generic_code);
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is to large to actually allocate an elements array.
+ __ JumpIfSmiGreaterEqualsConstant(rdx,
+ JSObject::kInitialMaxFastElementArray,
+ call_generic_code);
+
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0]: return address
+ // esp[8]: argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ true,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ movq(rax, rbx);
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ movq(rdx, rax);
+ __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
+ // rax: argc
+ // rdx: array_size (smi)
+ // rdi: constructor
+ // esp[0] : return address
+ // esp[8] : last argument
+ AllocateJSArray(masm,
+ rdi,
+ rdx,
+ rbx,
+ rcx,
+ r8,
+ r9,
+ false,
+ call_generic_code);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+
+ // rax: argc
+ // rbx: JSArray
+ // rcx: elements_array
+ // r8: elements_array_end (untagged)
+ // esp[0]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(r9, Operand(rsp, kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+ __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // rax: argc
+ // rbx: JSArray
+ // rdx: location of the first array element
+ // r9: location of the last argument
+ // esp[0]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ movq(rcx, rax);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), kScratchRegister);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ decq(rcx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // rax: argc
+ // rbx: JSArray
+ // esp[0]: return address
+ // esp[8]: last argument
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ push(rcx);
+ __ movq(rax, rbx);
+ __ ret(0);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ GenerateLoadArrayFunction(masm, rbx);
+ __ cmpq(rdi, rbx);
+ __ Assert(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ ASSERT(kSmiTag == 0);
+ Condition not_smi = __ CheckNotSmi(rbx);
+ __ Assert(not_smi, "Unexpected initial map for Array function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -463,8 +888,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call;
// Check that function is not a smi.
- __ testl(rdi, Immediate(kSmiTagMask));
- __ j(zero, &non_function_call);
+ __ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &non_function_call);
@@ -492,7 +916,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
- __ shl(rax, Immediate(kSmiTagSize));
+ __ Integer32ToSmi(rax, rax);
__ push(rax);
// Push the function to invoke on the stack.
@@ -517,8 +941,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &rt_call);
+ ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
__ CmpObjectType(rax, MAP_TYPE, rbx);
@@ -536,12 +960,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
// rdi: size of new object
- __ AllocateObjectInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rdi,
+ rbx,
+ rdi,
+ no_reg,
+ &rt_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
@@ -596,14 +1020,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rbx: JSObject
// rdi: start of next object (will be start of FixedArray)
// rdx: number of elements in properties array
- __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
+ __ AllocateInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ RESULT_CONTAINS_TOP);
// Initialize the FixedArray.
// rbx: JSObject
@@ -668,7 +1092,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Retrieve smi-tagged arguments count from the stack.
__ movq(rax, Operand(rsp, 0));
- __ shr(rax, Immediate(kSmiTagSize));
+ __ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -701,8 +1125,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on page 74.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver);
+ __ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
@@ -721,8 +1144,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ // TODO(smi): Find a way to abstract indexing by a smi.
__ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
+ // 1 * kPointerSize is offset of receiver.
+ __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ ret(0);
diff --git a/V8Binding/v8/src/x64/cfg-x64.cc b/V8Binding/v8/src/x64/cfg-x64.cc
deleted file mode 100644
index 0b71d8e..0000000
--- a/V8Binding/v8/src/x64/cfg-x64.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-x64.h"
-#include "debug.h"
-#include "macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- {
- Comment cmt(masm, "[ InstructionBlock");
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- // If the location of the current instruction is a temp, then the
- // instruction cannot be in tail position in the block. Allocate the
- // temp based on peeking ahead to the next instruction.
- Instruction* instr = instructions_[i];
- Location* loc = instr->location();
- if (loc->is_temporary()) {
- instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
- }
- instructions_[i]->Compile(masm);
- }
- }
- successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Label deferred_enter, deferred_exit;
- {
- Comment cmnt(masm, "[ EntryNode");
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- __ push(rdi);
- int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
- if (count > 0) {
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(kScratchRegister);
- }
- }
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_limit);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ j(below, &deferred_enter);
- __ bind(&deferred_exit);
- }
- }
- successor_->Compile(masm);
- if (FLAG_check_stack) {
- Comment cmnt(masm, "[ Deferred Stack Check");
- __ bind(&deferred_enter);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ jmp(&deferred_exit);
- }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
- ASSERT(!is_marked());
- is_marked_ = true;
- Comment cmnt(masm, "[ ExitNode");
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
- __ movq(rsp, rbp);
- __ pop(rbp);
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- __ ret((count + 1) * kPointerSize);
- // Add padding that will be overwritten by a debugger breakpoint.
- // "movq rsp, rbp; pop rbp" has length 4. "ret k" has length 3.
- const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
- for (int i = 0; i < kPadding; ++i) {
- __ int3();
- }
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
- // The key should not be on the stack---if it is a compiler-generated
- // temporary it is in the accumulator.
- ASSERT(!key()->is_on_stack());
-
- Comment cmnt(masm, "[ Load from Property");
- // If the key is known at compile-time we may be able to use a load IC.
- bool is_keyed_load = true;
- if (key()->is_constant()) {
- // Still use the keyed load IC if the key can be parsed as an integer so
- // we will get into the case that handles [] on string objects.
- Handle<Object> key_val = Constant::cast(key())->handle();
- uint32_t ignored;
- if (key_val->IsSymbol() &&
- !String::cast(*key_val)->AsArrayIndex(&ignored)) {
- is_keyed_load = false;
- }
- }
-
- if (!object()->is_on_stack()) object()->Push(masm);
- // A test rax instruction after the call indicates to the IC code that it
- // was inlined. Ensure there is not one after the call below.
- if (is_keyed_load) {
- key()->Push(masm);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ pop(rbx); // Discard key.
- } else {
- key()->Get(masm, rcx);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
- __ pop(rbx); // Discard receiver.
- location()->Set(masm, rax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
- // The right-hand value should not be on the stack---if it is a
- // compiler-generated temporary it is in the accumulator.
- ASSERT(!right()->is_on_stack());
-
- Comment cmnt(masm, "[ BinaryOpInstr");
- // We can overwrite one of the operands if it is a temporary.
- OverwriteMode mode = NO_OVERWRITE;
- if (left()->is_temporary()) {
- mode = OVERWRITE_LEFT;
- } else if (right()->is_temporary()) {
- mode = OVERWRITE_RIGHT;
- }
-
- // Push both operands and call the specialized stub.
- if (!left()->is_on_stack()) left()->Push(masm);
- right()->Push(masm);
- GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
- __ CallStub(&stub);
- location()->Set(masm, rax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
- // The location should be 'Effect'. As a side effect, move the value to
- // the accumulator.
- Comment cmnt(masm, "[ ReturnInstr");
- value()->Get(masm, rax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
- __ Move(reg, handle_);
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
- __ Push(handle_);
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
- switch (loc->type()) {
- case Slot::PARAMETER: {
- int count = CfgGlobals::current()->fun()->scope()->num_parameters();
- return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
- }
- case Slot::LOCAL: {
- const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(rbp, kOffset - loc->index() * kPointerSize);
- }
- default:
- UNREACHABLE();
- return Operand(rax, 0);
- }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ Move(ToOperand(loc), handle_);
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
- __ movq(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
- __ movq(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
- __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
- // We dispatch to the value because in some cases (temp or constant) we
- // can use special instruction sequences.
- value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- __ movq(kScratchRegister, ToOperand(this));
- __ movq(ToOperand(loc), kScratchRegister);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(reg, rax);
- break;
- case STACK:
- __ pop(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
- switch (where_) {
- case ACCUMULATOR:
- if (!reg.is(rax)) __ movq(rax, reg);
- break;
- case STACK:
- __ push(reg);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
- switch (where_) {
- case ACCUMULATOR:
- __ push(rax);
- break;
- case STACK:
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
- switch (where_) {
- case ACCUMULATOR:
- value->Get(masm, rax);
- break;
- case STACK:
- value->Push(masm);
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
- switch (where_) {
- case ACCUMULATOR:
- __ movq(ToOperand(loc), rax);
- break;
- case STACK:
- __ pop(ToOperand(loc));
- break;
- case NOT_ALLOCATED:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index d7e15aa..8e6dbef 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -509,6 +509,7 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// receiver.
frame_->Exit();
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint.
// frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
// with length 7 (3 + 1 + 3).
@@ -516,12 +517,12 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
- DeleteFrame();
-
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ DeleteFrame();
}
@@ -720,11 +721,12 @@ void CodeGenerator::CallApplyLazy(Property* apply,
frame_->SyncRange(0, frame_->element_count() - 1);
// Check that the receiver really is a JavaScript object.
- { frame_->PushElementAt(0);
+ {
+ frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
- __ testl(receiver.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(receiver.reg());
+ build_args.Branch(is_smi);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@@ -736,11 +738,12 @@ void CodeGenerator::CallApplyLazy(Property* apply,
}
// Verify that we're invoking Function.prototype.apply.
- { frame_->PushElementAt(1);
+ {
+ frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
- __ testl(apply.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(apply.reg());
+ build_args.Branch(is_smi);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
@@ -755,8 +758,8 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Get the function receiver from the stack. Check that it
// really is a function.
__ movq(rdi, Operand(rsp, 2 * kPointerSize));
- __ testl(rdi, Immediate(kSmiTagMask));
- build_args.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(rdi);
+ build_args.Branch(is_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
build_args.Branch(not_equal);
@@ -780,7 +783,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ shrl(rax, Immediate(kSmiTagSize));
+ __ SmiToInteger32(rax, rax);
__ movq(rcx, rax);
__ cmpq(rax, Immediate(kArgumentsLimit));
build_args.Branch(above);
@@ -910,7 +913,6 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
- CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot();
@@ -1657,8 +1659,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Check if enumerable is already a JSObject
// rax: value to be iterated over
- __ testl(rax, Immediate(kSmiTagMask));
- primitive.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(rax);
+ primitive.Branch(is_smi);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
jsobject.Branch(above_equal);
@@ -1695,8 +1697,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
- __ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ shl(rax, Immediate(kSmiTagSize));
+ __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@@ -1707,8 +1709,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
- __ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ shl(rax, Immediate(kSmiTagSize));
+ __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -1725,9 +1727,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2));
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- // Multiplier is times_4 since rax is already a Smi.
- __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
+ SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx,
+ FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
// Get the expected map from the stack or a zero map in the
// permanent slow case rax: current iteration count rbx: i'th entry
@@ -2589,7 +2591,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- CodeForStatementPosition(node);
{ Reference target(this, node->target());
if (target.is_illegal()) {
@@ -2671,8 +2672,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
- CodeForStatementPosition(node);
-
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
frame_->Push(&result);
@@ -2691,8 +2690,6 @@ void CodeGenerator::VisitCall(Call* node) {
ZoneList<Expression*>* args = node->arguments();
- CodeForStatementPosition(node);
-
// Check if the function is a variable or a property.
Expression* function = node->expression();
Variable* var = function->AsVariableProxy()->AsVariable();
@@ -2707,7 +2704,64 @@ void CodeGenerator::VisitCall(Call* node) {
// is resolved in cache misses (this also holds for megamorphic calls).
// ------------------------------------------------------------------------
- if (var != NULL && !var->is_this() && var->is_global()) {
+ if (var != NULL && var->is_possibly_eval()) {
+ // ----------------------------------
+ // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
+ // ----------------------------------
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+
+ // Prepare the stack for the call to the resolved function.
+ Load(function);
+
+ // Allocate a frame slot for the receiver.
+ frame_->Push(Factory::undefined_value());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
+
+ // Prepare the stack for the call to ResolvePossiblyDirectEval.
+ frame_->PushElementAt(arg_count + 1);
+ if (arg_count > 0) {
+ frame_->PushElementAt(arg_count);
+ } else {
+ frame_->Push(Factory::undefined_value());
+ }
+
+ // Resolve the call.
+ Result result =
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up the stack with the right values for the function and the
+ // receiver. Use a scratch register to avoid destroying the result.
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ __ movq(scratch.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+ frame_->SetElementAt(arg_count + 1, &scratch);
+
+ // We can reuse the result register now.
+ frame_->Spill(result.reg());
+ __ movq(result.reg(),
+ FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+ frame_->SetElementAt(arg_count, &result);
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ result = frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore the context and overwrite the function on the stack with
+ // the result.
+ frame_->RestoreContextRegister();
+ frame_->SetElementAt(0, &result);
+
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
@@ -2734,6 +2788,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->RestoreContextRegister();
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
+
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// ----------------------------------
@@ -2760,6 +2815,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CallWithArguments(args, node->position());
+
} else if (property != NULL) {
// Check if the key is a literal string.
Literal* literal = property->key()->AsLiteral();
@@ -2825,6 +2881,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CallWithArguments(args, node->position());
}
+
} else {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is not global
@@ -2842,70 +2899,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
-void CodeGenerator::VisitCallEval(CallEval* node) {
- Comment cmnt(masm_, "[ CallEval");
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
- // the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
-
- ZoneList<Expression*>* args = node->arguments();
- Expression* function = node->expression();
-
- CodeForStatementPosition(node);
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
-
- // Resolve the call.
- Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ movq(scratch.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ movq(result.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
- frame_->SetElementAt(arg_count, &result);
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-}
-
-
void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
- CodeForStatementPosition(node);
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -3093,8 +3088,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
- __ testl(operand.reg(), Immediate(kSmiTagMask));
- smi_label.Branch(zero, &operand);
+
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ smi_label.Branch(is_smi, &operand);
frame_->Push(&operand); // undo popping of TOS
Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
@@ -3103,9 +3099,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
- __ not_(answer.reg());
- // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
- __ xor_(answer.reg(), Immediate(kSmiTagMask));
+ __ SmiNot(answer.reg(), answer.reg());
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
@@ -3116,9 +3110,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
- __ testl(operand.reg(), Immediate(kSmiTagMask));
- continue_label.Branch(zero, &operand, taken);
-
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ continue_label.Branch(is_smi, &operand);
frame_->Push(&operand);
Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
CALL_FUNCTION, 1);
@@ -3264,8 +3257,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
}
// Smi test.
deferred->Branch(overflow);
- __ testl(kScratchRegister, Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
__ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
@@ -3470,8 +3462,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
answer.ToRegister();
if (check->Equals(Heap::number_symbol())) {
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- destination()->true_target()->Branch(zero);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->true_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
@@ -3479,8 +3471,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(equal);
} else if (check->Equals(Heap::string_symbol())) {
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
// It can be an undetectable string object.
__ movq(kScratchRegister,
@@ -3503,8 +3495,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
destination()->true_target()->Branch(equal);
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
// It can be an undetectable object.
__ movq(kScratchRegister,
@@ -3515,16 +3507,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(not_zero);
} else if (check->Equals(Heap::function_symbol())) {
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
answer.Unuse();
destination()->Split(equal);
} else if (check->Equals(Heap::object_symbol())) {
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(zero);
+ Condition is_smi = masm_->CheckSmi(answer.reg());
+ destination()->false_target()->Branch(is_smi);
__ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal);
@@ -3623,8 +3615,8 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
- __ testl(value.reg(), Immediate(kSmiTagMask));
- destination()->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ destination()->false_target()->Branch(is_smi);
// It is a heap object - get map.
// Check if the object is a JS array or not.
__ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
@@ -3727,17 +3719,13 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// push.
// If the receiver is a smi trigger the slow case.
- ASSERT(kSmiTag == 0);
- __ testl(object.reg(), Immediate(kSmiTagMask));
- __ j(zero, &slow_case);
+ __ JumpIfSmi(object.reg(), &slow_case);
// If the index is negative or non-smi trigger the slow case.
- ASSERT(kSmiTag == 0);
- __ testl(index.reg(),
- Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
- __ j(not_zero, &slow_case);
+ __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
+
// Untag the index.
- __ sarl(index.reg(), Immediate(kSmiTagSize));
+ __ SmiToInteger32(index.reg(), index.reg());
__ bind(&try_again_with_new_string);
// Fetch the instance type of the receiver into rcx.
@@ -3790,8 +3778,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- ASSERT(kSmiTag == 0);
- __ shl(temp.reg(), Immediate(kSmiTagSize));
+ __ Integer32ToSmi(temp.reg(), temp.reg());
__ jmp(&end);
// Handle non-flat strings.
@@ -3832,10 +3819,9 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
- __ testl(value.reg(),
- Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+ Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
value.Unuse();
- destination()->Split(zero);
+ destination()->Split(positive_smi);
}
@@ -3845,9 +3831,9 @@ void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
- __ testl(value.reg(), Immediate(kSmiTagMask));
+ Condition is_smi = masm_->CheckSmi(value.reg());
value.Unuse();
- destination()->Split(zero);
+ destination()->Split(is_smi);
}
@@ -3891,7 +3877,9 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi.
+ // RBP value is aligned, so it should be tagged as a smi (without necesarily
+ // being padded as a smi).
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
Result rbp_as_smi = allocator_->Allocate();
ASSERT(rbp_as_smi.is_valid());
__ movq(rbp_as_smi.reg(), rbp);
@@ -4002,8 +3990,8 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
frame_->Spill(obj.reg());
// If the object is a smi, we return null.
- __ testl(obj.reg(), Immediate(kSmiTagMask));
- null.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ null.Branch(is_smi);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
@@ -4064,8 +4052,8 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
object.ToRegister();
// if (object->IsSmi()) return value.
- __ testl(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero, &value);
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi, &value);
// It is a heap object - get its map.
Result scratch = allocator_->Allocate();
@@ -4105,8 +4093,8 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
object.ToRegister();
ASSERT(object.is_valid());
// if (object->IsSmi()) return object.
- __ testl(object.reg(), Immediate(kSmiTagMask));
- leave.Branch(zero);
+ Condition is_smi = masm_->CheckSmi(object.reg());
+ leave.Branch(is_smi);
// It is a heap object - get map.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
@@ -4274,11 +4262,10 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
dest->false_target()->Branch(equal);
// Smi => false iff zero.
- ASSERT(kSmiTag == 0);
- __ testl(value.reg(), value.reg());
- dest->false_target()->Branch(zero);
- __ testl(value.reg(), Immediate(kSmiTagMask));
- dest->true_target()->Branch(zero);
+ Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
+ dest->false_target()->Branch(equals);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
// Call the stub for all other cases.
frame_->Push(&value); // Undo the Pop() from above.
@@ -4940,8 +4927,9 @@ void CodeGenerator::Comparison(Condition cc,
JumpTarget is_smi;
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
- __ testl(left_side.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(zero, taken);
+
+ Condition left_is_smi = masm_->CheckSmi(left_side.reg());
+ is_smi.Branch(left_is_smi);
// Setup and call the compare stub.
CompareStub stub(cc, strict);
@@ -4982,8 +4970,8 @@ void CodeGenerator::Comparison(Condition cc,
dest->true_target()->Branch(equal);
__ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
dest->true_target()->Branch(equal);
- __ testl(operand.reg(), Immediate(kSmiTagMask));
- dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ dest->false_target()->Branch(is_smi);
// It can be an undetectable object.
// Use a scratch register in preference to spilling operand.reg().
@@ -5023,10 +5011,8 @@ void CodeGenerator::Comparison(Condition cc,
Register left_reg = left_side.reg();
Register right_reg = right_side.reg();
- __ movq(kScratchRegister, left_reg);
- __ or_(kScratchRegister, right_reg);
- __ testl(kScratchRegister, Immediate(kSmiTagMask));
- is_smi.Branch(zero, taken);
+ Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+ is_smi.Branch(both_smi);
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
@@ -5317,15 +5303,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
}
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- // A smi currently fits in a 32-bit Immediate.
- __ addl(operand->reg(), Immediate(smi_value));
- Label add_success;
- __ j(no_overflow, &add_success);
- __ subl(operand->reg(), Immediate(smi_value));
- deferred->Jump();
- __ bind(&add_success);
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ __ SmiAddConstant(operand->reg(),
+ operand->reg(),
+ int_value,
+ deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
break;
@@ -5342,15 +5324,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
// A smi currently fits in a 32-bit Immediate.
- __ subl(operand->reg(), Immediate(smi_value));
- Label add_success;
- __ j(no_overflow, &add_success);
- __ addl(operand->reg(), Immediate(smi_value));
- deferred->Jump();
- __ bind(&add_success);
+ __ SmiSubConstant(operand->reg(),
+ operand->reg(),
+ int_value,
+ deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
}
@@ -5374,12 +5353,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- if (shift_value > 0) {
- __ sarl(operand->reg(), Immediate(shift_value));
- __ and_(operand->reg(), Immediate(~kSmiTagMask));
- }
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ __ SmiShiftArithmeticRightConstant(operand->reg(),
+ operand->reg(),
+ shift_value);
deferred->BindExit();
frame_->Push(operand);
}
@@ -5403,21 +5380,13 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- __ movl(answer.reg(), operand->reg());
- __ sarl(answer.reg(), Immediate(kSmiTagSize));
- __ shrl(answer.reg(), Immediate(shift_value));
- // A negative Smi shifted right two is in the positive Smi range.
- if (shift_value < 2) {
- __ testl(answer.reg(), Immediate(0xc0000000));
- deferred->Branch(not_zero);
- }
- operand->Unuse();
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- __ addl(answer.reg(), answer.reg());
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ __ SmiShiftLogicalRightConstant(answer.reg(),
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
deferred->BindExit();
+ operand->Unuse();
frame_->Push(&answer);
}
break;
@@ -5441,8 +5410,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
} else {
@@ -5455,18 +5423,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
- __ movl(answer.reg(), operand->reg());
- ASSERT(kSmiTag == 0); // adjust code if not the case
- // We do no shifts, only the Smi conversion, if shift_value is 1.
- if (shift_value > 1) {
- __ shll(answer.reg(), Immediate(shift_value - 1));
- }
- // Convert int result to Smi, checking that it is in int range.
- ASSERT(kSmiTagSize == 1); // adjust code if not the case
- __ addl(answer.reg(), answer.reg());
- deferred->Branch(overflow);
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+ __ SmiShiftLeftConstant(answer.reg(),
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
deferred->BindExit();
operand->Unuse();
frame_->Push(&answer);
@@ -5490,18 +5451,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
- __ testl(operand->reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
if (op == Token::BIT_AND) {
- __ and_(operand->reg(), Immediate(smi_value));
+ __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
} else if (op == Token::BIT_XOR) {
if (int_value != 0) {
- __ xor_(operand->reg(), Immediate(smi_value));
+ __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
}
} else {
ASSERT(op == Token::BIT_OR);
if (int_value != 0) {
- __ or_(operand->reg(), Immediate(smi_value));
+ __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
}
}
deferred->BindExit();
@@ -5522,14 +5482,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
// Check for negative or non-Smi left hand side.
- __ testl(operand->reg(),
- Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
- deferred->Branch(not_zero);
+ __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
__ movl(operand->reg(), Immediate(Smi::FromInt(0)));
} else {
- __ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1));
+ __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
}
deferred->BindExit();
frame_->Push(operand);
@@ -5631,67 +5589,17 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->reg(),
right->reg(),
overwrite_mode);
- if (left->reg().is(right->reg())) {
- __ testl(left->reg(), Immediate(kSmiTagMask));
- } else {
- // Use the quotient register as a scratch for the tag check.
- if (!left_is_in_rax) __ movq(rax, left->reg());
- left_is_in_rax = false; // About to destroy the value in rax.
- __ or_(rax, right->reg());
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
- __ testl(rax, Immediate(kSmiTagMask));
- }
- deferred->Branch(not_zero);
-
- // All operations on the smi values are on 32-bit registers, which are
- // zero-extended into 64-bits by all 32-bit operations.
- if (!left_is_in_rax) __ movl(rax, left->reg());
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ testl(right->reg(), right->reg());
- deferred->Branch(zero);
- // Divide rdx:rax by the right operand.
- __ idivl(right->reg());
-
- // Complete the operation.
+ __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+
if (op == Token::DIV) {
- // Check for negative zero result. If the result is zero, and the
- // divisor is negative, return a floating point negative zero.
- Label non_zero_result;
- __ testl(left->reg(), left->reg());
- __ j(not_zero, &non_zero_result);
- __ testl(right->reg(), right->reg());
- deferred->Branch(negative);
- // The frame is identical on all paths reaching this label.
- __ bind(&non_zero_result);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by
- // idiv instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmpl(rax, Immediate(0x40000000));
- deferred->Branch(equal);
- // Check that the remainder is zero.
- __ testl(rdx, rdx);
- deferred->Branch(not_zero);
- // Tag the result and store it in the quotient register.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
frame_->Push(&quotient);
} else {
ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, return a floating point negative zero.
- Label non_zero_result;
- __ testl(rdx, rdx);
- __ j(not_zero, &non_zero_result);
- __ testl(left->reg(), left->reg());
- deferred->Branch(negative);
- // The frame is identical on all paths reaching this label.
- __ bind(&non_zero_result);
+ __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -5730,59 +5638,30 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
overwrite_mode);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), rcx);
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
- // Untag both operands.
- __ movl(answer.reg(), left->reg());
- __ sarl(answer.reg(), Immediate(kSmiTagSize));
- __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op) {
case Token::SAR:
- __ sarl(answer.reg());
- // No checks of result necessary
+ __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
break;
case Token::SHR: {
- Label result_ok;
- __ shrl(answer.reg());
- // Check that the *unsigned* result fits in a smi. Neither of
- // the two high-order bits can be set:
- // * 0x80000000: high bit would be lost when smi tagging.
- // * 0x40000000: this number would convert to negative when smi
- // tagging.
- // These two cases can only happen with shifts by 0 or 1 when
- // handed a valid smi. If the answer cannot be represented by a
- // smi, restore the left and right arguments, and jump to slow
- // case. The low bit of the left argument may be lost, but only
- // in a case where it is dropped anyway.
- __ testl(answer.reg(), Immediate(0xc0000000));
- __ j(zero, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(rcx, Immediate(kSmiTagSize));
- deferred->Jump();
- __ bind(&result_ok);
+ __ SmiShiftLogicalRight(answer.reg(),
+ left->reg(),
+ rcx,
+ deferred->entry_label());
break;
}
case Token::SHL: {
- Label result_ok;
- __ shl(answer.reg());
- // Check that the *signed* result fits in a smi.
- __ cmpl(answer.reg(), Immediate(0xc0000000));
- __ j(positive, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(rcx, Immediate(kSmiTagSize));
- deferred->Jump();
- __ bind(&result_ok);
+ __ SmiShiftLeft(answer.reg(),
+ left->reg(),
+ rcx,
+ deferred->entry_label());
break;
}
default:
UNREACHABLE();
}
- // Smi-tag the result in answer.
- ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -5806,63 +5685,41 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->reg(),
right->reg(),
overwrite_mode);
- if (left->reg().is(right->reg())) {
- __ testl(left->reg(), Immediate(kSmiTagMask));
- } else {
- __ movq(answer.reg(), left->reg());
- __ or_(answer.reg(), right->reg());
- ASSERT(kSmiTag == 0); // Adjust test if not the case.
- __ testl(answer.reg(), Immediate(kSmiTagMask));
- }
- deferred->Branch(not_zero);
- __ movq(answer.reg(), left->reg());
+ __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+
switch (op) {
case Token::ADD:
- __ addl(answer.reg(), right->reg());
- deferred->Branch(overflow);
+ __ SmiAdd(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
case Token::SUB:
- __ subl(answer.reg(), right->reg());
- deferred->Branch(overflow);
+ __ SmiSub(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
case Token::MUL: {
- // If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // Remove smi tag from the left operand (but keep sign).
- // Left-hand operand has been copied into answer.
- __ sarl(answer.reg(), Immediate(kSmiTagSize));
- // Do multiplication of smis, leaving result in answer.
- __ imull(answer.reg(), right->reg());
- // Go slow on overflows.
- deferred->Branch(overflow);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case. The frame is unchanged
- // in this block, so local control flow can use a Label rather
- // than a JumpTarget.
- Label non_zero_result;
- __ testl(answer.reg(), answer.reg());
- __ j(not_zero, &non_zero_result);
- __ movq(answer.reg(), left->reg());
- __ or_(answer.reg(), right->reg());
- deferred->Branch(negative);
- __ xor_(answer.reg(), answer.reg()); // Positive 0 is correct.
- __ bind(&non_zero_result);
+ __ SmiMul(answer.reg(),
+ left->reg(),
+ right->reg(),
+ deferred->entry_label());
break;
}
case Token::BIT_OR:
- __ or_(answer.reg(), right->reg());
+ __ SmiOr(answer.reg(), left->reg(), right->reg());
break;
case Token::BIT_AND:
- __ and_(answer.reg(), right->reg());
+ __ SmiAnd(answer.reg(), left->reg(), right->reg());
break;
case Token::BIT_XOR:
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- __ xor_(answer.reg(), right->reg());
+ __ SmiXor(answer.reg(), left->reg(), right->reg());
break;
default:
@@ -5973,8 +5830,7 @@ void Reference::GetValue(TypeofState typeof_state) {
GetName());
// Check that the receiver is a heap object.
- __ testl(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
@@ -6046,8 +5902,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
- __ testl(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
}
// Initially, use an invalid map. The map is patched in the IC
@@ -6062,9 +5917,7 @@ void Reference::GetValue(TypeofState typeof_state) {
deferred->Branch(not_equal);
// Check that the key is a non-negative smi.
- __ testl(key.reg(),
- Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
- deferred->Branch(not_zero);
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Get the elements array from the receiver and check that it
// is not a dictionary.
@@ -6076,8 +5929,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// Shift the key to get the actual index value and check that
// it is within bounds.
- __ movl(index.reg(), key.reg());
- __ shrl(index.reg(), Immediate(kSmiTagSize));
+ __ SmiToInteger32(index.reg(), key.reg());
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
@@ -6228,20 +6080,16 @@ void Reference::SetValue(InitState init_state) {
// Check that the value is a smi if it is not a constant.
// We can skip the write barrier for smis and constants.
if (!value_is_constant) {
- __ testl(value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ __ JumpIfNotSmi(value.reg(), deferred->entry_label());
}
// Check that the key is a non-negative smi.
- __ testl(key.reg(),
- Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
- deferred->Branch(not_zero);
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Ensure that the smi is zero-extended. This is not guaranteed.
__ movl(key.reg(), key.reg());
// Check that the receiver is not a smi.
- __ testl(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
// Check that the receiver is a JSArray.
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
@@ -6272,11 +6120,11 @@ void Reference::SetValue(InitState init_state) {
deferred->Branch(not_equal);
// Store the value.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiTag);
- __ movq(Operand(tmp.reg(),
- key.reg(),
- times_half_pointer_size,
+ SmiIndex index =
+ masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+ __ movq(Operand(tmp.reg(),
+ index.reg,
+ index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
@@ -6457,15 +6305,14 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
Label try_float;
Label special;
// Check whether the value is a smi.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float);
+ __ JumpIfNotSmi(rax, &try_float);
// Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
// Also enter it if the value of the smi is Smi::kMinValue
__ testl(rax, Immediate(0x7FFFFFFE));
__ j(zero, &special);
- __ neg(rax);
+ __ negl(rax);
__ jmp(&done);
__ bind(&special);
@@ -6567,23 +6414,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// be equal if the other is a HeapNumber. If so, use the slow case.
{
Label not_smis;
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ movq(rcx, Immediate(kSmiTagMask));
- __ and_(rcx, rax);
- __ testq(rcx, rdx);
- __ j(not_zero, &not_smis);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // rcx still holds rax & kSmiTag, which is either zero or one.
- __ decq(rcx); // If rax is a smi, all 1s, else all 0s.
- __ movq(rbx, rdx);
- __ xor_(rbx, rax);
- __ and_(rbx, rcx); // rbx holds either 0 or rax ^ rdx.
- __ xor_(rbx, rax);
- // if rax was smi, rbx is now rdx, else rax.
+ __ SelectNonSmi(rbx, rax, rdx, &not_smis);
// Check if the non-smi operand is a heap number.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -6712,8 +6543,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch) {
- __ testl(object, Immediate(kSmiTagMask));
- __ j(zero, label);
+ __ JumpIfSmi(object, label);
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
@@ -6757,8 +6587,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the object - go slow case if it's a smi.
Label slow;
__ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
@@ -6771,8 +6600,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ TryGetFunctionPrototype(rdx, rbx, &slow);
// Check that the function prototype is a JS object.
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(rbx, &slow);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
__ j(below, &slow);
__ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
@@ -6824,12 +6652,14 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movq(Operand(rsp, 1 * kPointerSize), rcx);
- __ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement));
+ SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+ __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
+ __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
}
@@ -6843,8 +6673,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check that the key is a smi.
Label slow;
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(rdx, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -6860,12 +6689,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- // Shifting code depends on SmiEncoding being equivalent to left shift:
- // we multiply by four to get pointer alignment.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ lea(rbx, Operand(rbp, rax, times_4, 0));
- __ neg(rdx);
- __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
@@ -6877,12 +6704,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- // Shifting code depends on SmiEncoding being equivalent to left shift:
- // we multiply by four to get pointer alignment.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ lea(rbx, Operand(rbx, rcx, times_4, 0));
- __ neg(rdx);
- __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+ index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
@@ -6891,7 +6716,9 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(rbx); // Return address.
__ push(rdx);
__ push(rbx);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+ Runtime::Function* f =
+ Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
+ __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
}
@@ -6915,6 +6742,23 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
}
+int CEntryStub::MinorKey() {
+ ASSERT(result_size_ <= 2);
+#ifdef _WIN64
+ // Simple results returned in rax (using default code).
+ // Complex results must be written to address passed as first argument.
+ // Use even numbers for minor keys, reserving the odd numbers for
+ // CEntryDebugBreakStub.
+ return (result_size_ < 2) ? 0 : result_size_ * 2;
+#else
+ // Single results returned in rax (both AMD64 and Win64 calling conventions)
+ // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
+ // by default.
+ return 0;
+#endif
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain next handler, frame pointer, state and
// return address in that order.
@@ -6986,8 +6830,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
__ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
- // Pass a pointer to the Arguments object as the first argument.
- __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ if (result_size_ < 2) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax).
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ } else {
+ ASSERT_EQ(2, result_size_);
+ // Pass a pointer to the result location as the first argument.
+ __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+ }
+
#else // ! defined(_WIN64)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
@@ -7010,7 +6864,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(frame_type, result_size_);
__ ret(0);
// Handling of failure.
@@ -7109,8 +6963,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
// Check that the function really is a JavaScript function.
- __ testl(rdi, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(rdi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -7146,7 +6999,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
StackFrame::EXIT;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(frame_type, result_size_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -7333,7 +7186,8 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ push(rax);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
+ __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
}
@@ -7342,12 +7196,12 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch,
Register result) {
// Allocate heap number in new space.
- __ AllocateObjectInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- need_gc,
- TAG_OBJECT);
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ need_gc,
+ TAG_OBJECT);
// Set the map and tag the result.
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
@@ -7359,13 +7213,12 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
- __ testl(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
+ __ JumpIfSmi(number, &load_smi);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
- __ sarl(number, Immediate(kSmiTagSize));
+ __ SmiToInteger32(number, number);
__ push(number);
__ fild_s(Operand(rsp, 0));
__ pop(number);
@@ -7379,13 +7232,12 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
XMMRegister dst) {
Label load_smi, done;
- __ testl(src, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
+ __ JumpIfSmi(src, &load_smi);
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
- __ sarl(src, Immediate(kSmiTagSize));
+ __ SmiToInteger32(src, src);
__ cvtlsi2sd(dst, src);
__ bind(&done);
@@ -7414,26 +7266,24 @@ void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
Label load_smi_1, load_smi_2, done_load_1, done;
__ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
- __ testl(kScratchRegister, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1);
+ __ JumpIfSmi(kScratchRegister, &load_smi_1);
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
__ bind(&done_load_1);
__ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ testl(kScratchRegister, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2);
+ __ JumpIfSmi(kScratchRegister, &load_smi_2);
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_1);
- __ sarl(kScratchRegister, Immediate(kSmiTagSize));
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_1);
__ bind(&load_smi_2);
- __ sarl(kScratchRegister, Immediate(kSmiTagSize));
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
@@ -7446,29 +7296,23 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
- __ testl(lhs, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_lhs);
+ __ JumpIfSmi(lhs, &load_smi_lhs);
__ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
__ bind(&done_load_lhs);
- __ testl(rhs, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_rhs);
+ __ JumpIfSmi(rhs, &load_smi_rhs);
__ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_lhs);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiTag == 0);
- __ movsxlq(kScratchRegister, lhs);
- __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ SmiToInteger64(kScratchRegister, lhs);
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_lhs);
__ bind(&load_smi_rhs);
- __ movsxlq(kScratchRegister, rhs);
- __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ SmiToInteger64(kScratchRegister, rhs);
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
@@ -7482,14 +7326,12 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label test_other, done;
// Test if both operands are numbers (heap_numbers or smis).
// If not, jump to label non_float.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &test_other); // argument in rdx is OK
+ __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, non_float); // The argument in rdx is not a number.
__ bind(&test_other);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in rax is OK
+ __ JumpIfSmi(rax, &done); // argument in rax is OK
__ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, non_float); // The argument in rax is not a number.
@@ -7520,88 +7362,41 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// leave result in register rax.
// Smi check both operands.
- __ movq(rcx, rbx);
- __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
- __ testl(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, slow);
+ __ JumpIfNotBothSmi(rax, rbx, slow);
switch (op_) {
case Token::ADD: {
- __ addl(rax, rbx);
- __ j(overflow, slow); // The slow case rereads operands from the stack.
+ __ SmiAdd(rax, rax, rbx, slow);
break;
}
case Token::SUB: {
- __ subl(rax, rbx);
- __ j(overflow, slow); // The slow case rereads operands from the stack.
+ __ SmiSub(rax, rax, rbx, slow);
break;
}
case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below if not the case
- // Remove tag from one of the operands (but keep sign).
- __ sarl(rax, Immediate(kSmiTagSize));
- // Do multiplication.
- __ imull(rax, rbx); // multiplication of smis; result in eax
- // Go slow on overflows.
- __ j(overflow, slow);
- // Check for negative zero result.
- __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
+ __ SmiMul(rax, rax, rbx, slow);
break;
case Token::DIV:
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ testl(rbx, rbx);
- __ j(zero, slow);
- // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
- __ idivl(rbx);
- // Check that the remainder is zero.
- __ testl(rdx, rdx);
- __ j(not_zero, slow);
- // Check for the corner case of dividing the most negative smi
- // by -1. We cannot use the overflow flag, since it is not set
- // by idiv instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- // TODO(X64): TODO(Smi): Smi implementation dependent constant.
- // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
- __ cmpl(rax, Immediate(0x40000000));
- __ j(equal, slow);
- // Check for negative zero result.
- __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
- // Tag the result and store it in register rax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ __ SmiDiv(rax, rax, rbx, slow);
break;
case Token::MOD:
- // Sign extend eax into edx:eax
- __ cdq();
- // Check for 0 divisor.
- __ testl(rbx, rbx);
- __ j(zero, slow);
- // Divide edx:eax by ebx.
- __ idivl(rbx);
- // Check for negative zero result.
- __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
- // Move remainder to register rax.
- __ movl(rax, rdx);
+ __ SmiMod(rax, rax, rbx, slow);
break;
case Token::BIT_OR:
- __ or_(rax, rbx);
+ __ SmiOr(rax, rax, rbx);
break;
case Token::BIT_AND:
- __ and_(rax, rbx);
+ __ SmiAnd(rax, rax, rbx);
break;
case Token::BIT_XOR:
- ASSERT_EQ(0, kSmiTag);
- __ xor_(rax, rbx);
+ __ SmiXor(rax, rax, rbx);
break;
case Token::SHL:
@@ -7609,41 +7404,20 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SAR:
// Move the second operand into register ecx.
__ movl(rcx, rbx);
- // Remove tags from operands (but keep sign).
- __ sarl(rax, Immediate(kSmiTagSize));
- __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op_) {
case Token::SAR:
- __ sarl(rax);
- // No checks of result necessary
+ __ SmiShiftArithmeticRight(rax, rax, rbx);
break;
case Token::SHR:
- __ shrl(rax); // rcx is implicit shift register
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ testl(rax, Immediate(0xc0000000));
- __ j(not_zero, slow);
+ __ SmiShiftLogicalRight(rax, rax, rbx, slow);
break;
case Token::SHL:
- __ shll(rax);
- // Check that the *signed* result fits in a smi.
- // It does, if the 30th and 31st bits are equal, since then
- // shifting the SmiTag in at the bottom doesn't change the sign.
- ASSERT(kSmiTagSize == 1);
- __ cmpl(rax, Immediate(0xc0000000));
- __ j(sign, slow);
+ __ SmiShiftLeft(rax, rax, rbx, slow);
break;
default:
UNREACHABLE();
}
- // Tag the result and store it in register eax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
break;
default:
@@ -7691,8 +7465,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm,
@@ -7798,8 +7571,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
- ASSERT(kSmiTagSize == 1); // adjust code if not the case
- __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+ __ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
@@ -7814,8 +7586,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// allocation of a heap number.
__ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index 2ae8145..87db3a9 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -553,7 +553,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(AstNode* node);
+ void CodeForStatementPosition(Statement* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
diff --git a/V8Binding/v8/src/x64/debug-x64.cc b/V8Binding/v8/src/x64/debug-x64.cc
index f2bb62b..10092c5 100644
--- a/V8Binding/v8/src/x64/debug-x64.cc
+++ b/V8Binding/v8/src/x64/debug-x64.cc
@@ -160,18 +160,6 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
- // OK to clobber rbx as we are returning from a JS function through the code
- // generated by CodeGenerator::GenerateReturnSequence()
- ExternalReference debug_break_return =
- ExternalReference(Debug_Address::DebugBreakReturn());
- __ movq(rbx, debug_break_return);
- __ movq(rbx, Operand(rbx, 0));
- __ addq(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(rbx);
-}
-
-
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// REgister state for IC store call (from ic-x64.cc).
// ----------- S t a t e -------------
@@ -207,7 +195,7 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Debug::kX64JSReturnSequenceLength >= Debug::kX64CallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return_entry()->entry(),
+ rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
Debug::kX64JSReturnSequenceLength - Debug::kX64CallInstructionLength);
}
diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc
index e2f7c30..8209091 100644
--- a/V8Binding/v8/src/x64/ic-x64.cc
+++ b/V8Binding/v8/src/x64/ic-x64.cc
@@ -95,7 +95,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r2, FieldOperand(r0, kCapacityOffset));
- __ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
+ __ SmiToInteger32(r2, r2);
__ decl(r2);
// Generate an unrolled loop that performs a few probes before
@@ -132,7 +132,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
@@ -148,8 +148,7 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value) {
Label done;
// Check if the value is a Smi.
- __ testl(value, Immediate(kSmiTagMask));
- __ j(zero, &done);
+ __ JumpIfSmi(value, &done);
// Check if the object has been loaded.
__ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
__ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
@@ -167,7 +166,7 @@ static bool PatchInlinedMapCheck(Address address, Object* map) {
// Arguments are address of start of call sequence that called
// the IC,
Address test_instruction_address =
- address + Assembler::kPatchReturnSequenceLength;
+ address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -236,7 +235,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm,
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -258,15 +257,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, check_string, index_int, index_string;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
// Check that the object isn't a smi.
- __ testl(rcx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(rcx, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -283,18 +281,42 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &slow);
// Check that the key is a smi.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string);
- __ sarl(rax, Immediate(kSmiTagSize));
+ __ JumpIfNotSmi(rax, &check_string);
+ __ SmiToInteger32(rax, rax);
// Get the elements array of the object.
__ bind(&index_int);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(below, &fast); // Unsigned comparison rejects negative indices.
+ __ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
+ // Fast case: Do the load.
+ __ movq(rax, Operand(rcx, rax, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+
+ // Check whether the elements is a pixel array.
+ // rax: untagged index
+ // rcx: elements array
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -335,16 +357,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
__ shrl(rax, Immediate(String::kLongLengthShift));
__ jmp(&index_int);
- // Fast case: Do the load.
- __ bind(&fast);
- __ movq(rax, Operand(rcx, rax, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, &slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
- __ ret(0);
}
@@ -373,7 +385,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ push(rcx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
@@ -394,7 +406,7 @@ void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Do tail-call to runtime routine.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
@@ -405,13 +417,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -422,8 +433,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Get the key from the stack.
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(rbx, &slow);
// If it is a smi, make sure it is zero-extended, so it can be
// used as an index in a memory operand.
__ movl(rbx, rbx); // Clear the high bits of rbx.
@@ -440,18 +450,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi), zero-extended.
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &slow);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
- __ movl(rdx, rbx);
- __ sarl(rdx, Immediate(kSmiTagSize));
+ __ SmiToInteger32(rdx, rbx);
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
// rax: value
// rcx: FixedArray
// rbx: index (as a smi)
__ j(below, &fast);
-
// Slow case: Push extra copies of the arguments (3).
__ bind(&slow);
__ pop(rcx);
@@ -460,8 +469,39 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ push(rax);
__ push(rcx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ // Check whether the elements is a pixel array.
+ // rax: value
+ // rcx: elements array
+ // rbx: index (as a smi), zero-extended.
+ __ bind(&check_pixel_array);
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kPixelArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ // Check that the value is a smi. If a conversion is needed call into the
+ // runtime to convert and clamp.
+ __ JumpIfNotSmi(rax, &slow);
+ __ SmiToInteger32(rbx, rbx);
+ __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ movq(rdx, rax); // Save the value.
+ __ SmiToInteger32(rax, rax);
+ { // Clamp the value to [0..255].
+ Label done, is_negative;
+ __ testl(rax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ j(negative, &is_negative);
+ __ movl(rax, Immediate(255));
+ __ jmp(&done);
+ __ bind(&is_negative);
+ __ xorl(rax, rax); // Clear rax.
+ __ bind(&done);
+ }
+ __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(rcx, rbx, times_1, 0), rax);
+ __ movq(rax, rdx); // Return the original value.
+ __ ret(0);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -473,16 +513,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi)
// flags: compare (rbx, rdx.length())
__ j(not_equal, &slow); // do not leave holes in the array
- __ sarl(rbx, Immediate(kSmiTagSize)); // untag
+ __ SmiToInteger64(rbx, rbx);
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
- // Restore tag and increment.
- __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
+ // Increment and restore smi-tag.
+ __ Integer64AddToSmi(rbx, rbx, 1);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
- __ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
+ __ SmiSubConstant(rbx, rbx, 1, NULL);
__ jmp(&fast);
-
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
@@ -499,7 +538,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
__ j(above_equal, &extra);
-
// Fast case: Do the store.
__ bind(&fast);
// rax: value
@@ -532,7 +570,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ push(rbx);
// Call the entry.
- CEntryStub stub;
+ CEntryStub stub(1);
__ movq(rax, Immediate(2));
__ movq(rbx, f);
__ CallStub(&stub);
@@ -544,8 +582,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Check if the receiver is a global object of some sort.
Label invoke, global;
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &invoke);
+ __ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
__ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
@@ -594,8 +631,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// to probe.
//
// Check for number.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &number);
+ __ JumpIfSmi(rdx, &number);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
__ j(not_equal, &non_number);
__ bind(&number);
@@ -640,8 +676,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(rdx, miss);
// Check that the value is a JavaScript function.
__ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
@@ -683,8 +718,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Check that the receiver is a valid JS object.
// Because there are so many map checks and type checks, do not
@@ -763,7 +797,7 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 2);
+ __ TailCallRuntime(f, 2, 1);
}
@@ -844,8 +878,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, kPointerSize));
// Check that the receiver isn't a smi.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rax, &miss);
// Check that the receiver is a valid JS object.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
@@ -902,7 +935,7 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
- address + Assembler::kPatchReturnSequenceLength;
+ address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -940,7 +973,7 @@ void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ push(rbx); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3);
+ __ TailCallRuntime(f, 3, 1);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
@@ -959,7 +992,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// Perform tail call to the entry.
__ TailCallRuntime(
- ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index a42f628..38ada92 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -63,6 +63,13 @@ void MacroAssembler::CompareRoot(Register with,
}
+void MacroAssembler::CompareRoot(Operand with,
+ Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ cmpq(with, kScratchRegister);
+}
+
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
Register addr,
@@ -318,7 +325,8 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
- int num_arguments) {
+ int num_arguments,
+ int result_size) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : argument num_arguments - 1
@@ -331,14 +339,15 @@ void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
// should remove this need and make the runtime routine entry code
// smarter.
movq(rax, Immediate(num_arguments));
- JumpToBuiltin(ext);
+ JumpToRuntime(ext, result_size);
}
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
+ int result_size) {
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
- CEntryStub ces;
+ CEntryStub ces(result_size);
movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
jmp(kScratchRegister);
}
@@ -410,6 +419,757 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+#ifdef DEBUG
+ cmpq(src, Immediate(0xC0000000u));
+ Check(positive, "Smi conversion overflow");
+#endif
+ if (dst.is(src)) {
+ addl(dst, src);
+ } else {
+ lea(dst, Operand(src, src, times_1, 0));
+ }
+}
+
+
+void MacroAssembler::Integer32ToSmi(Register dst,
+ Register src,
+ Label* on_overflow) {
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ addl(dst, src);
+ j(overflow, on_overflow);
+}
+
+
+void MacroAssembler::Integer64AddToSmi(Register dst,
+ Register src,
+ int constant) {
+#ifdef DEBUG
+ movl(kScratchRegister, src);
+ addl(kScratchRegister, Immediate(constant));
+ Check(no_overflow, "Add-and-smi-convert overflow");
+ Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
+ Check(valid, "Add-and-smi-convert overflow");
+#endif
+ lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger32(Register dst, Register src) {
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ sarl(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger64(Register dst, Register src) {
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ movsxlq(dst, src);
+ sar(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+ Register src,
+ int power) {
+ ASSERT(power >= 0);
+ ASSERT(power < 64);
+ if (power == 0) {
+ SmiToInteger64(dst, src);
+ return;
+ }
+ movsxlq(dst, src);
+ shl(dst, Immediate(power - 1));
+}
+
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ testl(src, Immediate(kSmiTagMask));
+ j(zero, on_smi);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+ Condition not_smi = CheckNotSmi(src);
+ j(not_smi, on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+ Label* on_not_positive_smi) {
+ Condition not_positive_smi = CheckNotPositiveSmi(src);
+ j(not_positive_smi, on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ int constant,
+ Label* on_equals) {
+ if (Smi::IsValid(constant)) {
+ Condition are_equal = CheckSmiEqualsConstant(src, constant);
+ j(are_equal, on_equals);
+ }
+}
+
+
+void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_greater_equals) {
+ if (Smi::IsValid(constant)) {
+ Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
+ j(are_greater_equal, on_greater_equals);
+ } else if (constant < Smi::kMinValue) {
+ jmp(on_greater_equals);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(ReverseCondition(is_valid), on_invalid);
+}
+
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ Label* on_not_both_smi) {
+ Condition not_both_smi = CheckNotBothSmi(src1, src2);
+ j(not_both_smi, on_not_both_smi);
+}
+
+Condition MacroAssembler::CheckSmi(Register src) {
+ testb(src, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
+Condition MacroAssembler::CheckNotSmi(Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ testb(src, Immediate(kSmiTagMask));
+ return not_zero;
+}
+
+
+Condition MacroAssembler::CheckPositiveSmi(Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+ return zero;
+}
+
+
+Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+ return not_zero;
+}
+
+
+Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
+ if (first.is(second)) {
+ return CheckSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ return CheckSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
+ ASSERT_EQ(0, kSmiTag);
+ if (first.is(second)) {
+ return CheckNotSmi(first);
+ }
+ movl(kScratchRegister, first);
+ or_(kScratchRegister, second);
+ return CheckNotSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckIsMinSmi(Register src) {
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ cmpl(src, Immediate(0x40000000));
+ return equal;
+}
+
+Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
+ if (constant == 0) {
+ testl(src, src);
+ return zero;
+ }
+ if (Smi::IsValid(constant)) {
+ cmpl(src, Immediate(Smi::FromInt(constant)));
+ return zero;
+ }
+ // Can't be equal.
+ UNREACHABLE();
+ return no_condition;
+}
+
+
+Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
+ int constant) {
+ if (constant == 0) {
+ testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
+ return positive;
+ }
+ if (Smi::IsValid(constant)) {
+ cmpl(src, Immediate(Smi::FromInt(constant)));
+ return greater_equal;
+ }
+ // Can't be equal.
+ UNREACHABLE();
+ return no_condition;
+}
+
+
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
+ // A 32-bit integer value can be converted to a smi if it is in the
+ // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
+ // representation have bits 30 and 31 be equal.
+ cmpl(src, Immediate(0xC0000000u));
+ return positive;
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ Label* on_not_smi_result) {
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ negl(dst);
+ testl(dst, Immediate(0x7fffffff));
+ // If the result is zero or 0x80000000, negation failed to create a smi.
+ j(equal, on_not_smi_result);
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ if (!dst.is(src1)) {
+ movl(dst, src1);
+ }
+ addl(dst, src2);
+ if (!dst.is(src1)) {
+ j(overflow, on_not_smi_result);
+ } else {
+ Label smi_result;
+ j(no_overflow, &smi_result);
+ // Restore src1.
+ subl(src1, src2);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ }
+}
+
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ if (!dst.is(src1)) {
+ movl(dst, src1);
+ }
+ subl(dst, src2);
+ if (!dst.is(src1)) {
+ j(overflow, on_not_smi_result);
+ } else {
+ Label smi_result;
+ j(no_overflow, &smi_result);
+ // Restore src1.
+ addl(src1, src2);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(dst, src1);
+
+ imull(dst, src2);
+ j(overflow, on_not_smi_result);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case. The frame is unchanged
+ // in this block, so local control flow can use a Label rather
+ // than a JumpTarget.
+ Label non_zero_result;
+ testl(dst, dst);
+ j(not_zero, &non_zero_result);
+
+ // Test whether either operand is negative (the other must be zero).
+ orl(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&non_zero_result);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(Smi::IsValid(constant));
+
+ Register tmp = (src.is(dst) ? kScratchRegister : dst);
+ movl(tmp, src);
+ addl(tmp, Immediate(Smi::FromInt(constant)));
+ if (tmp.is(kScratchRegister)) {
+ j(overflow, on_not_smi_result);
+ testl(tmp, Immediate(kSmiTagMask));
+ j(not_zero, on_not_smi_result);
+ movl(dst, tmp);
+ } else {
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ cmovl(overflow, dst, kScratchRegister);
+ testl(dst, kScratchRegister);
+ j(not_zero, on_not_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result) {
+ ASSERT(Smi::IsValid(constant));
+ if (on_not_smi_result == NULL) {
+ if (dst.is(src)) {
+ movl(dst, src);
+ } else {
+ lea(dst, Operand(src, constant << kSmiTagSize));
+ }
+ } else {
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ addl(dst, Immediate(Smi::FromInt(constant)));
+ if (!dst.is(src)) {
+ j(overflow, on_not_smi_result);
+ } else {
+ Label result_ok;
+ j(no_overflow, &result_ok);
+ subl(dst, Immediate(Smi::FromInt(constant)));
+ jmp(on_not_smi_result);
+ bind(&result_ok);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result) {
+ ASSERT(Smi::IsValid(constant));
+ Smi* smi_value = Smi::FromInt(constant);
+ if (dst.is(src)) {
+ // Optimistic subtract - may change value of dst register,
+ // if it has garbage bits in the higher half, but will not change
+ // the value as a tagged smi.
+ subl(dst, Immediate(smi_value));
+ if (on_not_smi_result != NULL) {
+ Label add_success;
+ j(no_overflow, &add_success);
+ addl(dst, Immediate(smi_value));
+ jmp(on_not_smi_result);
+ bind(&add_success);
+ }
+ } else {
+ UNIMPLEMENTED(); // Not used yet.
+ }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ Label positive_divisor;
+ testl(src2, src2);
+ j(zero, on_not_smi_result);
+ j(positive, &positive_divisor);
+ // Check for negative zero result. If the dividend is zero, and the
+ // divisor is negative, return a floating point negative zero.
+ testl(src1, src1);
+ j(zero, on_not_smi_result);
+ bind(&positive_divisor);
+
+ // Sign extend src1 into edx:eax.
+ if (!src1.is(rax)) {
+ movl(rax, src1);
+ }
+ cdq();
+
+ idivl(src2);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by
+ // idiv instruction.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ cmpl(rax, Immediate(0x40000000));
+ j(equal, on_not_smi_result);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ j(not_zero, on_not_smi_result);
+ // Tag the result and store it in the destination register.
+ Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ testl(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ // Mist remember the value to see if a zero result should
+ // be a negative zero.
+ movl(kScratchRegister, rax);
+ } else {
+ movl(rax, src1);
+ }
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, return a floating point negative zero.
+ Label non_zero_result;
+ testl(rdx, rdx);
+ j(not_zero, &non_zero_result);
+ if (src1.is(rax)) {
+ testl(kScratchRegister, kScratchRegister);
+ } else {
+ testl(src1, src1);
+ }
+ j(negative, on_not_smi_result);
+ bind(&non_zero_result);
+ if (!dst.is(rdx)) {
+ movl(dst, rdx);
+ }
+}
+
+
+void MacroAssembler::SmiNot(Register dst, Register src) {
+ if (dst.is(src)) {
+ not_(dst);
+ // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
+ xor_(src, Immediate(kSmiTagMask));
+ } else {
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(src, kSmiTagMask));
+ not_(dst);
+ }
+}
+
+
+void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1)) {
+ movl(dst, src1);
+ }
+ and_(dst, src2);
+}
+
+
+void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
+ ASSERT(Smi::IsValid(constant));
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ and_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1)) {
+ movl(dst, src1);
+ }
+ or_(dst, src2);
+}
+
+
+void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
+ ASSERT(Smi::IsValid(constant));
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ or_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1)) {
+ movl(dst, src1);
+ }
+ xor_(dst, src2);
+}
+
+
+void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
+ ASSERT(Smi::IsValid(constant));
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ xor_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+
+void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
+ Register src,
+ int shift_value) {
+ if (shift_value > 0) {
+ if (dst.is(src)) {
+ sarl(dst, Immediate(shift_value));
+ and_(dst, Immediate(~kSmiTagMask));
+ } else {
+ UNIMPLEMENTED(); // Not used.
+ }
+ }
+}
+
+
+void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movl(dst, src);
+ // Untag the smi.
+ sarl(dst, Immediate(kSmiTagSize));
+ if (shift_value < 2) {
+ // A negative Smi shifted right two is in the positive Smi range,
+ // but if shifted only by zero or one, it never is.
+ j(negative, on_not_smi_result);
+ }
+ if (shift_value > 0) {
+ // Do the right shift on the integer value.
+ shrl(dst, Immediate(shift_value));
+ }
+ // Re-tag the result.
+ addl(dst, dst);
+ }
+}
+
+
+void MacroAssembler::SmiShiftLeftConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result) {
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movl(dst, src);
+ if (shift_value > 0) {
+ // Treat dst as an untagged integer value equal to two times the
+ // smi value of src, i.e., already shifted left by one.
+ if (shift_value > 1) {
+ shll(dst, Immediate(shift_value - 1));
+ }
+ // Convert int result to Smi, checking that it is in smi range.
+ ASSERT(kSmiTagSize == 1); // adjust code if not the case
+ Integer32ToSmi(dst, dst, on_not_smi_result);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiShiftLeft(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(rcx));
+ Label result_ok;
+ // Untag both operands.
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll(dst);
+ // Check that the *signed* result fits in a smi.
+ Condition is_valid = CheckInteger32ValidSmiValue(dst);
+ j(is_valid, &result_ok);
+ // Restore the relevant bits of the source registers
+ // and call the slow version.
+ if (dst.is(src1)) {
+ shrl(dst);
+ Integer32ToSmi(dst, dst);
+ }
+ Integer32ToSmi(rcx, rcx);
+ jmp(on_not_smi_result);
+ bind(&result_ok);
+ Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(rcx));
+ Label result_ok;
+ // Untag both operands.
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+
+ shrl(dst);
+ // Check that the *unsigned* result fits in a smi.
+ // I.e., that it is a valid positive smi value. The positive smi
+ // values are 0..0x3fffffff, i.e., neither of the top-most two
+ // bits can be set.
+ //
+ // These two cases can only happen with shifts by 0 or 1 when
+ // handed a valid smi. If the answer cannot be represented by a
+ // smi, restore the left and right arguments, and jump to slow
+ // case. The low bit of the left argument may be lost, but only
+ // in a case where it is dropped anyway.
+ testl(dst, Immediate(0xc0000000));
+ j(zero, &result_ok);
+ if (dst.is(src1)) {
+ shll(dst);
+ Integer32ToSmi(dst, dst);
+ }
+ Integer32ToSmi(rcx, rcx);
+ jmp(on_not_smi_result);
+ bind(&result_ok);
+ // Smi-tag the result in answer.
+ Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftArithmeticRight(Register dst,
+ Register src1,
+ Register src2) {
+ ASSERT(!dst.is(rcx));
+ // Untag both operands.
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ // Shift as integer.
+ sarl(dst);
+ // Retag result.
+ Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis) {
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ Condition not_both_smis = CheckNotBothSmi(src1, src2);
+ Check(not_both_smis, "Both registers were smis.");
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movq(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ j(not_zero, on_not_smis);
+ // One operand is a smi.
+
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+}
+
+
+SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+ ASSERT(is_uint6(shift));
+ if (shift == 0) { // times_1.
+ SmiToInteger32(dst, src);
+ return SmiIndex(dst, times_1);
+ }
+ if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
+ // We expect that all smis are actually zero-padded. If this holds after
+ // checking, this line can be omitted.
+ movl(dst, src); // Ensure that the smi is zero-padded.
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+ }
+ // Shift by shift-kSmiTagSize.
+ movl(dst, src); // Ensure that the smi is zero-padded.
+ shl(dst, Immediate(shift - kSmiTagSize));
+ return SmiIndex(dst, times_1);
+}
+
+
+SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
+ Register src,
+ int shift) {
+ // Register src holds a positive smi.
+ ASSERT(is_uint6(shift));
+ if (shift == 0) { // times_1.
+ SmiToInteger32(dst, src);
+ neg(dst);
+ return SmiIndex(dst, times_1);
+ }
+ if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
+ movl(dst, src);
+ neg(dst);
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+ }
+ // Shift by shift-kSmiTagSize.
+ movl(dst, src);
+ neg(dst);
+ shl(dst, Immediate(shift - kSmiTagSize));
+ return SmiIndex(dst, times_1);
+}
+
+
+
bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false;
}
@@ -518,7 +1278,7 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif
jmp(kScratchRegister);
#ifdef DEBUG
- ASSERT_EQ(kPatchReturnSequenceLength,
+ ASSERT_EQ(kCallTargetAddressOffset,
SizeOfCodeGeneratedSince(&target) + kPointerSize);
#endif
}
@@ -547,7 +1307,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif
call(kScratchRegister);
#ifdef DEBUG
- ASSERT_EQ(kPatchReturnSequenceLength,
+ ASSERT_EQ(kCallTargetAddressOffset,
SizeOfCodeGeneratedSince(&target) + kPointerSize);
#endif
}
@@ -597,7 +1357,7 @@ void MacroAssembler::Ret() {
void MacroAssembler::FCmp() {
- fcompp();
+ fucompp();
push(rax);
fnstsw_ax();
if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
@@ -819,7 +1579,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
- { pc_offset() - kPatchReturnSequenceLength, flags, name };
+ { pc_offset() - kCallTargetAddressOffset, flags, name };
unresolved_.Add(entry);
}
}
@@ -971,7 +1731,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack.
@@ -1016,6 +1776,21 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
+#ifdef _WIN64
+ // Reserve space on stack for result and argument structures, if necessary.
+ int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
+ // Reserve space for the Arguments object. The Windows 64-bit ABI
+ // requires us to pass this structure as a pointer to its location on
+ // the stack. The structure contains 2 values.
+ int argument_stack_space = 2 * kPointerSize;
+ // We also need backing space for 4 parameters, even though
+ // we only pass one or two parameter, and it is in a register.
+ int argument_mirror_space = 4 * kPointerSize;
+ int total_stack_space =
+ argument_mirror_space + argument_stack_space + result_stack_space;
+ subq(rsp, Immediate(total_stack_space));
+#endif
+
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
@@ -1024,30 +1799,19 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
and_(rsp, kScratchRegister);
}
-#ifdef _WIN64
- // Reserve space for the Arguments object. The Windows 64-bit ABI
- // requires us to pass this structure as a pointer to its location on
- // the stack. The structure contains 2 pointers.
- // The structure on the stack must be 16-byte aligned.
- // We also need backing space for 4 parameters, even though
- // we only pass one parameter, and it is in a register.
- subq(rsp, Immediate(6 * kPointerSize));
- ASSERT(kFrameAlignment == 2 * kPointerSize); // Change the padding if needed.
-#endif
-
// Patch the saved entry sp.
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
// Registers:
// r15 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
- // It's okay to clobber register ebx below because we don't need
+ // It's okay to clobber register rbx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
@@ -1060,7 +1824,18 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
- // Pop the arguments and the receiver from the caller stack.
+#ifdef _WIN64
+ // If return value is on the stack, pop it to registers.
+ if (result_size > 1) {
+ ASSERT_EQ(2, result_size);
+ // Position above 4 argument mirrors and arguments object.
+ movq(rax, Operand(rsp, 6 * kPointerSize));
+ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ }
+#endif
+
+ // Pop everything up to and including the arguments and the receiver
+ // from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
@@ -1284,12 +2059,12 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
}
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -1313,14 +2088,14 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
@@ -1344,12 +2119,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int header_size,
}
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
// Load address of new object into result.
LoadAllocationTopHelper(result, result_end, scratch, flags);
@@ -1389,4 +2164,23 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
}
+CodePatcher::CodePatcher(byte* address, int size)
+ : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index 8fc7a9c..adc136a 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -41,6 +41,13 @@ static const Register kScratchRegister = r10;
// Forward declaration.
class JumpTarget;
+struct SmiIndex {
+ SmiIndex(Register index_register, ScaleFactor scale)
+ : reg(index_register),
+ scale(scale) {}
+ Register reg;
+ ScaleFactor scale;
+};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
@@ -49,6 +56,7 @@ class MacroAssembler: public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// ---------------------------------------------------------------------------
@@ -87,15 +95,15 @@ class MacroAssembler: public Assembler {
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register eax and
- // sets up the number of arguments in register edi and the pointer
- // to the first argument in register esi.
- void EnterExitFrame(StackFrame::Type type);
+ // EXIT_DEBUG. Expects the number of arguments in register rax and
+ // sets up the number of arguments in register rdi and the pointer
+ // to the first argument in register rsi.
+ void EnterExitFrame(StackFrame::Type type, int result_size = 1);
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(StackFrame::Type type);
+ // Leave the current exit frame. Expects/provides the return value in
+ // register rax:rdx (untouched) and the pointer to the first
+ // argument in register rsi.
+ void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
// ---------------------------------------------------------------------------
@@ -126,6 +134,239 @@ class MacroAssembler: public Assembler {
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ // ---------------------------------------------------------------------------
+ // Smi tagging, untagging and operations on tagged smis.
+
+ // Conversions between tagged smi values and non-tagged integer values.
+
+ // Tag an integer value. The result must be known to be a valid smi value.
+ // Only uses the low 32 bits of the src register.
+ void Integer32ToSmi(Register dst, Register src);
+
+ // Tag an integer value if possible, or jump the integer value cannot be
+ // represented as a smi. Only uses the low 32 bit of the src registers.
+ void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
+
+ // Adds constant to src and tags the result as a smi.
+ // Result must be a valid smi.
+ void Integer64AddToSmi(Register dst, Register src, int constant);
+
+ // Convert smi to 32-bit integer. I.e., not sign extended into
+ // high 32 bits of destination.
+ void SmiToInteger32(Register dst, Register src);
+
+ // Convert smi to 64-bit integer (sign extended if necessary).
+ void SmiToInteger64(Register dst, Register src);
+
+ // Multiply a positive smi's integer value by a power of two.
+ // Provides result as 64-bit integer value.
+ void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+ Register src,
+ int power);
+
+ // Functions performing a check on a known or potential smi. Returns
+ // a condition that is satisfied if the check is successful.
+
+ // Is the value a tagged smi.
+ Condition CheckSmi(Register src);
+
+ // Is the value not a tagged smi.
+ Condition CheckNotSmi(Register src);
+
+ // Is the value a positive tagged smi.
+ Condition CheckPositiveSmi(Register src);
+
+ // Is the value not a positive tagged smi.
+ Condition CheckNotPositiveSmi(Register src);
+
+ // Are both values are tagged smis.
+ Condition CheckBothSmi(Register first, Register second);
+
+ // Is one of the values not a tagged smi.
+ Condition CheckNotBothSmi(Register first, Register second);
+
+ // Is the value the minimum smi value (since we are using
+ // two's complement numbers, negating the value is known to yield
+ // a non-smi value).
+ Condition CheckIsMinSmi(Register src);
+
+ // Check whether a tagged smi is equal to a constant.
+ Condition CheckSmiEqualsConstant(Register src, int constant);
+
+ // Check whether a tagged smi is greater than or equal to a constant.
+ Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
+
+ // Checks whether an 32-bit integer value is a valid for conversion
+ // to a smi.
+ Condition CheckInteger32ValidSmiValue(Register src);
+
+ // Test-and-jump functions. Typically combines a check function
+ // above with a conditional jump.
+
+ // Jump if the value cannot be represented by a smi.
+ void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+
+ // Jump to label if the value is a tagged smi.
+ void JumpIfSmi(Register src, Label* on_smi);
+
+ // Jump to label if the value is not a tagged smi.
+ void JumpIfNotSmi(Register src, Label* on_not_smi);
+
+ // Jump to label if the value is not a positive tagged smi.
+ void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+
+ // Jump to label if the value is a tagged smi with value equal
+ // to the constant.
+ void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
+
+ // Jump to label if the value is a tagged smi with value greater than or equal
+ // to the constant.
+ void JumpIfSmiGreaterEqualsConstant(Register src,
+ int constant,
+ Label* on_equals);
+
+ // Jump if either or both register are not smi values.
+ void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+
+ // Operations on tagged smi values.
+
+ // Smis represent a subset of integers. The subset is always equivalent to
+ // a two's complement interpretation of a fixed number of bits.
+
+ // Optimistically adds an integer constant to a supposed smi.
+ // If the src is not a smi, or the result is not a smi, jump to
+ // the label.
+ void SmiTryAddConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result);
+
+ // Add an integer constant to a tagged smi, giving a tagged smi as result,
+ // or jumping to a label if the result cannot be represented by a smi.
+ // If the label is NULL, no testing on the result is done.
+ void SmiAddConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result);
+
+ // Subtract an integer constant from a tagged smi, giving a tagged smi as
+ // result, or jumping to a label if the result cannot be represented by a smi.
+ // If the label is NULL, no testing on the result is done.
+ void SmiSubConstant(Register dst,
+ Register src,
+ int32_t constant,
+ Label* on_not_smi_result);
+
+ // Negating a smi can give a negative zero or too large positive value.
+ void SmiNeg(Register dst,
+ Register src,
+ Label* on_not_smi_result);
+
+ // Adds smi values and return the result as a smi.
+ // If dst is src1, then src1 will be destroyed, even if
+ // the operation is unsuccessful.
+ void SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+
+ // Subtracts smi values and return the result as a smi.
+ // If dst is src1, then src1 will be destroyed, even if
+ // the operation is unsuccessful.
+ void SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+
+ // Multiplies smi values and return the result as a smi,
+ // if possible.
+ // If dst is src1, then src1 will be destroyed, even if
+ // the operation is unsuccessful.
+ void SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+
+ // Divides one smi by another and returns the quotient.
+ // Clobbers rax and rdx registers.
+ void SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+
+ // Divides one smi by another and returns the remainder.
+ // Clobbers rax and rdx registers.
+ void SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+
+ // Bitwise operations.
+ void SmiNot(Register dst, Register src);
+ void SmiAnd(Register dst, Register src1, Register src2);
+ void SmiOr(Register dst, Register src1, Register src2);
+ void SmiXor(Register dst, Register src1, Register src2);
+ void SmiAndConstant(Register dst, Register src1, int constant);
+ void SmiOrConstant(Register dst, Register src1, int constant);
+ void SmiXorConstant(Register dst, Register src1, int constant);
+
+ void SmiShiftLeftConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result);
+ void SmiShiftLogicalRightConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result);
+ void SmiShiftArithmeticRightConstant(Register dst,
+ Register src,
+ int shift_value);
+
+ // Shifts a smi value to the left, and returns the result if that is a smi.
+ // Uses and clobbers rcx, so dst may not be rcx.
+ void SmiShiftLeft(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+ // Shifts a smi value to the right, shifting in zero bits at the top, and
+ // returns the unsigned intepretation of the result if that is a smi.
+ // Uses and clobbers rcx, so dst may not be rcx.
+ void SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result);
+ // Shifts a smi value to the right, sign extending the top, and
+ // returns the signed intepretation of the result. That will always
+ // be a valid smi value, since it's numerically smaller than the
+ // original.
+ // Uses and clobbers rcx, so dst may not be rcx.
+ void SmiShiftArithmeticRight(Register dst,
+ Register src1,
+ Register src2);
+
+ // Specialized operations
+
+ // Select the non-smi register of two registers where exactly one is a
+ // smi. If neither are smis, jump to the failure label.
+ void SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis);
+
+ // Converts, if necessary, a smi to a combination of number and
+ // multiplier to be used as a scaled index.
+ // The src register contains a *positive* smi value. The shift is the
+ // power of two to multiply the index value by (e.g.
+ // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
+ // The returned index register may be either src or dst, depending
+ // on what is most efficient. If src and dst are different registers,
+ // src is always unchanged.
+ SmiIndex SmiToIndex(Register dst, Register src, int shift);
+
+ // Converts a positive smi to a negative index.
+ SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+
// ---------------------------------------------------------------------------
// Macro instructions
@@ -218,30 +459,30 @@ class MacroAssembler: public Assembler {
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
- // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateObjectInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateObjectInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -296,12 +537,14 @@ class MacroAssembler: public Assembler {
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToBuiltin, but also takes care of passing the number
+ // Like JumpToRuntime, but also takes care of passing the number
// of arguments.
- void TailCallRuntime(const ExternalReference& ext, int num_arguments);
+ void TailCallRuntime(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
- // Jump to the builtin routine.
- void JumpToBuiltin(const ExternalReference& ext);
+ // Jump to a runtime routine.
+ void JumpToRuntime(const ExternalReference& ext, int result_size);
// ---------------------------------------------------------------------------
@@ -361,8 +604,16 @@ class MacroAssembler: public Assembler {
Label* done,
InvokeFlag flag);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
+ // Prepares for a call or jump to a builtin by doing two things:
+ // 1. Emits code that fetches the builtin's function object from the context
+ // at runtime, and puts it in the register rdi.
+ // 2. Fetches the builtin's code object, and returns it in a handle, at
+ // compile time, so that later code can emit instructions to jump or call
+ // the builtin directly. If the code object has not yet been created, it
+ // returns the builtin code object for IllegalFunction, and sets the
+ // output parameter "resolved" to false. Code that uses the return value
+ // should then add the address and the builtin name to the list of fixups
+ // called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
index 963d80e..5d17a2d 100644
--- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -612,7 +612,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots,
__ movq(Operand(rbp, kInputString), rcx);
- __ movzxlq(Operand(rbp, kStartIndex), rdx); // Passed as int in eax.
+ __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
diff --git a/V8Binding/v8/src/x64/simulator-x64.h b/V8Binding/v8/src/x64/simulator-x64.h
index 184c166..998c909 100644
--- a/V8Binding/v8/src/x64/simulator-x64.h
+++ b/V8Binding/v8/src/x64/simulator-x64.h
@@ -28,6 +28,7 @@
#ifndef V8_X64_SIMULATOR_X64_H_
#define V8_X64_SIMULATOR_X64_H_
+#include "allocation.h"
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
@@ -35,15 +36,15 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
- (reinterpret_cast<uintptr_t>(this) >= limit ? \
- reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on x64 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ return c_limit;
+ }
+};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index 1443b87..0994230 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -47,17 +47,19 @@ static void ProbeTable(MacroAssembler* masm,
StubCache::Table table,
Register name,
Register offset) {
+ // The offset register must hold a *positive* smi.
ExternalReference key_offset(SCTableReference::keyReference(table));
Label miss;
__ movq(kScratchRegister, key_offset);
+ SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
// Check that the key in the entry matches the name.
- __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
__ j(not_equal, &miss);
// Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset.
__ movq(kScratchRegister,
- Operand(kScratchRegister, offset, times_4, kPointerSize));
+ Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
@@ -163,8 +165,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(name));
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ movl(scratch, FieldOperand(name, String::kLengthOffset));
@@ -204,8 +205,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
- __ testl(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
@@ -275,8 +275,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -296,8 +295,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
Label* smi,
Label* non_string_object) {
// Check that the object isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -325,7 +323,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// rcx is also the receiver.
__ lea(rcx, Operand(scratch, String::kLongLengthShift));
__ shr(rax); // rcx is implicit shift register.
- __ shl(rax, Immediate(kSmiTagSize));
+ __ Integer32ToSmi(rax, rax);
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -354,7 +352,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
__ movq(rax, Immediate(5));
__ movq(rbx, ref);
- CEntryStub stub;
+ CEntryStub stub(1);
__ CallStub(&stub);
}
@@ -489,7 +487,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(ref, 5);
+ __ TailCallRuntime(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@@ -511,7 +509,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(ref, 5);
+ __ TailCallRuntime(ref, 5, 1);
}
private:
@@ -535,8 +533,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -661,7 +658,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ movq(rax, Immediate(5));
__ movq(rbx, ref);
- CEntryStub stub;
+ CEntryStub stub(1);
__ CallStub(&stub);
__ LeaveInternalFrame();
@@ -701,8 +698,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rdx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -738,8 +734,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &fast);
+ __ JumpIfSmi(rdx, &fast);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &miss);
__ bind(&fast);
@@ -830,8 +825,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rdx, &miss);
// Do the right check and compute the holder register.
Register reg =
@@ -841,8 +835,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
// Check that the function really is a function.
- __ testl(rdi, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rdi, &miss);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss);
@@ -899,8 +892,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rax, &miss);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss);
@@ -952,8 +944,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ testl(rdx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rdx, &miss);
}
// Check that the maps haven't changed.
@@ -1112,8 +1103,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rax, &miss);
}
// Check that the maps haven't changed.
@@ -1335,8 +1325,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi.
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rbx, &miss);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -1362,7 +1351,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
- __ TailCallRuntime(store_callback_property, 4);
+ __ TailCallRuntime(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1424,8 +1413,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi.
- __ testl(rbx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(rbx, &miss);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -1450,7 +1438,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallRuntime(store_ic_property, 3);
+ __ TailCallRuntime(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -1631,8 +1619,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1652,7 +1639,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5);
+ __ TailCallRuntime(load_callback_property, 5, 1);
}
@@ -1701,8 +1688,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check the prototype chain.
Register reg =
@@ -1724,8 +1710,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1766,8 +1751,8 @@ Object* ConstructStubCompiler::CompileConstructStub(
// Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ testq(rbx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub_call);
+ ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rbx, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call);
@@ -1784,12 +1769,12 @@ Object* ConstructStubCompiler::CompileConstructStub(
// rbx: initial map
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateObjectInNewSpace(rcx,
- rdx,
- rcx,
- no_reg,
- &generic_stub_call,
- NO_ALLOCATION_FLAGS);
+ __ AllocateInNewSpace(rcx,
+ rdx,
+ rcx,
+ no_reg,
+ &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.cc b/V8Binding/v8/src/x64/virtual-frame-x64.cc
index c2866a7..655f4c6 100644
--- a/V8Binding/v8/src/x64/virtual-frame-x64.cc
+++ b/V8Binding/v8/src/x64/virtual-frame-x64.cc
@@ -65,8 +65,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG
// Verify that rdi contains a JS function. The following code
// relies on rax being available for use.
- __ testl(rdi, Immediate(kSmiTagMask));
- __ Check(not_zero,
+ Condition not_smi = masm()->CheckNotSmi(rdi);
+ __ Check(not_smi,
"VirtualFrame::Enter - rdi is not a function (smi check).");
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ Check(equal,
diff --git a/V8Binding/v8/src/zone-inl.h b/V8Binding/v8/src/zone-inl.h
index b3141a4..121ba19 100644
--- a/V8Binding/v8/src/zone-inl.h
+++ b/V8Binding/v8/src/zone-inl.h
@@ -276,12 +276,19 @@ void ZoneSplayTree<C>::Splay(const Key& key) {
}
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback) {
- if (node == NULL) return;
- DoForEach<Node, Callback>(node->left(), callback);
- callback->Call(node->key(), node->value());
- DoForEach<Node, Callback>(node->right(), callback);
+template <typename Config> template <class Callback>
+void ZoneSplayTree<Config>::ForEach(Callback* callback) {
+ // Pre-allocate some space for tiny trees.
+ ZoneList<Node*> nodes_to_visit(10);
+ nodes_to_visit.Add(root_);
+ int pos = 0;
+ while (pos < nodes_to_visit.length()) {
+ Node* node = nodes_to_visit[pos++];
+ if (node == NULL) continue;
+ callback->Call(node->key(), node->value());
+ nodes_to_visit.Add(node->left());
+ nodes_to_visit.Add(node->right());
+ }
}
diff --git a/V8Binding/v8/src/zone.h b/V8Binding/v8/src/zone.h
index cdbab32..4e4f1d7 100644
--- a/V8Binding/v8/src/zone.h
+++ b/V8Binding/v8/src/zone.h
@@ -204,10 +204,6 @@ class ZoneScope BASE_EMBEDDED {
};
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback);
-
-
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree:
//
@@ -297,9 +293,7 @@ class ZoneSplayTree : public ZoneObject {
};
template <class Callback>
- void ForEach(Callback* c) {
- DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
- }
+ void ForEach(Callback* callback);
private:
Node* root_;
diff --git a/V8Binding/v8/test/cctest/SConscript b/V8Binding/v8/test/cctest/SConscript
index fc4e01a..9103403 100644
--- a/V8Binding/v8/test/cctest/SConscript
+++ b/V8Binding/v8/test/cctest/SConscript
@@ -45,6 +45,7 @@ SOURCES = {
'test-func-name-inference.cc',
'test-hashmap.cc',
'test-heap.cc',
+ 'test-heap-profiler.cc',
'test-list.cc',
'test-lock.cc',
'test-log.cc',
diff --git a/V8Binding/v8/test/cctest/cctest.status b/V8Binding/v8/test/cctest/cctest.status
index 68aabb5..8fff769 100644
--- a/V8Binding/v8/test/cctest/cctest.status
+++ b/V8Binding/v8/test/cctest/cctest.status
@@ -36,8 +36,6 @@ test-api/ApplyInterruption: PASS || TIMEOUT
[ $arch == arm ]
-test-debug: SKIP
-
# BUG(113): Test seems flaky on ARM.
test-spaces/LargeObjectSpace: PASS || FAIL
diff --git a/V8Binding/v8/test/cctest/test-alloc.cc b/V8Binding/v8/test/cctest/test-alloc.cc
index 9996eeb..1235b13 100644
--- a/V8Binding/v8/test/cctest/test-alloc.cc
+++ b/V8Binding/v8/test/cctest/test-alloc.cc
@@ -43,7 +43,14 @@ static Object* AllocateAfterFailures() {
NewSpace* new_space = Heap::new_space();
static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
while (new_space->Available() > kNewSpaceFillerSize) {
+ int available_before = new_space->Available();
CHECK(!Heap::AllocateByteArray(0)->IsFailure());
+ if (available_before == new_space->Available()) {
+ // It seems that we are avoiding new space allocations when
+ // allocation is forced, so no need to fill up new space
+ // in order to make the test harder.
+ break;
+ }
}
CHECK(!Heap::AllocateByteArray(100)->IsFailure());
CHECK(!Heap::AllocateFixedArray(100, NOT_TENURED)->IsFailure());
@@ -144,3 +151,65 @@ TEST(StressJS) {
CHECK_EQ(42, result->Int32Value());
env->Exit();
}
+
+
+// CodeRange test.
+// Tests memory management in a CodeRange by allocating and freeing blocks,
+// using a pseudorandom generator to choose block sizes geometrically
+// distributed between 2 * Page::kPageSize and 2^5 + 1 * Page::kPageSize.
+// Ensure that the freed chunks are collected and reused by allocating (in
+// total) more than the size of the CodeRange.
+
+// This pseudorandom generator does not need to be particularly good.
+// Use the lower half of the V8::Random() generator.
+unsigned int Pseudorandom() {
+ static uint32_t lo = 2345;
+ lo = 18273 * (lo & 0xFFFF) + (lo >> 16); // Provably not 0.
+ return lo & 0xFFFF;
+}
+
+
+// Plain old data class. Represents a block of allocated memory.
+class Block {
+ public:
+ Block(void* base_arg, int size_arg)
+ : base(base_arg), size(size_arg) {}
+
+ void *base;
+ int size;
+};
+
+
+TEST(CodeRange) {
+ const int code_range_size = 16*MB;
+ CodeRange::Setup(code_range_size);
+ int current_allocated = 0;
+ int total_allocated = 0;
+ List<Block> blocks(1000);
+
+ while (total_allocated < 5 * code_range_size) {
+ if (current_allocated < code_range_size / 10) {
+ // Allocate a block.
+ // Geometrically distributed sizes, greater than Page::kPageSize.
+ size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
+ Pseudorandom() % 5000 + 1;
+ size_t allocated = 0;
+ void* base = CodeRange::AllocateRawMemory(requested, &allocated);
+ blocks.Add(Block(base, allocated));
+ current_allocated += allocated;
+ total_allocated += allocated;
+ } else {
+ // Free a block.
+ int index = Pseudorandom() % blocks.length();
+ CodeRange::FreeRawMemory(blocks[index].base, blocks[index].size);
+ current_allocated -= blocks[index].size;
+ if (index < blocks.length() - 1) {
+ blocks[index] = blocks.RemoveLast();
+ } else {
+ blocks.RemoveLast();
+ }
+ }
+ }
+
+ CodeRange::TearDown();
+}
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 80f91d3..f430cbd 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,6 +31,7 @@
#include "api.h"
#include "compilation-cache.h"
+#include "execution.h"
#include "snapshot.h"
#include "platform.h"
#include "top.h"
@@ -7729,6 +7730,42 @@ THREADED_TEST(PixelArray) {
CHECK_EQ(1503, result->Int32Value());
result = CompileRun("pixels[1]");
CHECK_EQ(1, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = -i;"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 0;"
+ "}"
+ "sum;");
+ CHECK_EQ(0, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 255;"
+ "}"
+ "sum;");
+ CHECK_EQ(8 * 255, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = 256 + i;"
+ "}"
+ "sum;");
+ CHECK_EQ(2076, result->Int32Value());
+
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i] = pixels[i] = i;"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value());
+
result = CompileRun("var sum = 0;"
"for (var i = 0; i < 8; i++) {"
" sum += pixels[i];"
@@ -7839,6 +7876,9 @@ THREADED_TEST(PixelArray) {
CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+ result = CompileRun("pixels[1] = 23;");
+ CHECK_EQ(23, result->Int32Value());
+
free(pixel_data);
}
@@ -7879,3 +7919,77 @@ THREADED_TEST(IdleNotification) {
for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
}
+
+
+static uint32_t* stack_limit;
+
+static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) {
+ stack_limit = reinterpret_cast<uint32_t*>(i::StackGuard::climit());
+ return v8::Undefined();
+}
+
+
+// Uses the address of a local variable to determine the stack top now.
+// Given a size, returns an address that is that far from the current
+// top of stack.
+static uint32_t* ComputeStackLimit(uint32_t size) {
+ uint32_t* answer = &size - (size / sizeof(size));
+ // If the size is very large and the stack is very near the bottom of
+ // memory then the calculation above may wrap around and give an address
+ // that is above the (downwards-growing) stack. In that case we return
+ // a very low address.
+ if (answer > &size) return reinterpret_cast<uint32_t*>(sizeof(size));
+ return answer;
+}
+
+
+TEST(SetResourceConstraints) {
+ static const int K = 1024;
+ uint32_t* set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+}
+
+
+TEST(SetResourceConstraintsInThread) {
+ uint32_t* set_limit;
+ {
+ v8::Locker locker;
+ static const int K = 1024;
+ set_limit = ComputeStackLimit(128 * K);
+
+ // Set stack limit.
+ v8::ResourceConstraints constraints;
+ constraints.set_stack_limit(set_limit);
+ CHECK(v8::SetResourceConstraints(&constraints));
+
+ // Execute a script.
+ v8::HandleScope scope;
+ LocalContext env;
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(GetStackLimitCallback);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("get_stack_limit"), fun);
+ CompileRun("get_stack_limit();");
+
+ CHECK(stack_limit == set_limit);
+ }
+ {
+ v8::Locker locker;
+ CHECK(stack_limit == set_limit);
+ }
+}
diff --git a/V8Binding/v8/test/cctest/test-assembler-arm.cc b/V8Binding/v8/test/cctest/test-assembler-arm.cc
index 34f1639..f6e4d04 100644
--- a/V8Binding/v8/test/cctest/test-assembler-arm.cc
+++ b/V8Binding/v8/test/cctest/test-assembler-arm.cc
@@ -37,9 +37,9 @@ using namespace v8::internal;
// Define these function prototypes to match JSEntryFunction in execution.cc.
-typedef int (*F1)(int x, int p1, int p2, int p3, int p4);
-typedef int (*F2)(int x, int y, int p2, int p3, int p4);
-typedef int (*F3)(void* p, int p1, int p2, int p3, int p4);
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
static v8::Persistent<v8::Context> env;
diff --git a/V8Binding/v8/test/cctest/test-conversions.cc b/V8Binding/v8/test/cctest/test-conversions.cc
index 6c0b9a6..35ab46f 100644
--- a/V8Binding/v8/test/cctest/test-conversions.cc
+++ b/V8Binding/v8/test/cctest/test-conversions.cc
@@ -91,13 +91,15 @@ TEST(NonStrDecimalLiteral) {
CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS));
}
+class OneBit1: public BitField<uint32_t, 0, 1> {};
+class OneBit2: public BitField<uint32_t, 7, 1> {};
+class EightBit1: public BitField<uint32_t, 0, 8> {};
+class EightBit2: public BitField<uint32_t, 13, 8> {};
TEST(BitField) {
uint32_t x;
// One bit bit field can hold values 0 and 1.
- class OneBit1: public BitField<uint32_t, 0, 1> {};
- class OneBit2: public BitField<uint32_t, 7, 1> {};
CHECK(!OneBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!OneBit2::is_valid(static_cast<uint32_t>(-1)));
for (int i = 0; i < 2; i++) {
@@ -113,8 +115,6 @@ TEST(BitField) {
CHECK(!OneBit2::is_valid(2));
// Eight bit bit field can hold values from 0 tp 255.
- class EightBit1: public BitField<uint32_t, 0, 8> {};
- class EightBit2: public BitField<uint32_t, 13, 8> {};
CHECK(!EightBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!EightBit2::is_valid(static_cast<uint32_t>(-1)));
for (int i = 0; i < 256; i++) {
diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc
index 0cae26c..1da363c 100644
--- a/V8Binding/v8/test/cctest/test-debug.cc
+++ b/V8Binding/v8/test/cctest/test-debug.cc
@@ -2301,13 +2301,8 @@ TEST(DebugStepLinearMixedICs) {
break_point_hit_count = 0;
foo->Call(env->Global(), 0, NULL);
- // With stepping all break locations are hit. For ARM the keyed load/store
- // is not hit as they are not implemented as ICs.
-#if defined (__arm__) || defined(__thumb__)
- CHECK_EQ(6, break_point_hit_count);
-#else
+ // With stepping all break locations are hit.
CHECK_EQ(8, break_point_hit_count);
-#endif
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
@@ -4101,11 +4096,11 @@ v8::Handle<v8::Function> debugger_call_with_data;
// passed it throws an exception.
static const char* debugger_call_with_closure_source =
"var x = 3;"
- "function (exec_state) {"
+ "(function (exec_state) {"
" if (exec_state.y) return x - 1;"
" exec_state.y = x;"
" return exec_state.y"
- "}";
+ "})";
v8::Handle<v8::Function> debugger_call_with_closure;
// Function to retrieve the number of JavaScript frames by calling a JavaScript
@@ -4527,6 +4522,7 @@ TEST(DebuggerAgent) {
// with the client connected.
ok = i::Debugger::StartAgent("test", kPort2);
CHECK(ok);
+ i::Debugger::WaitForAgent();
i::Socket* client = i::OS::CreateSocket();
ok = client->Connect("localhost", port2_str);
CHECK(ok);
diff --git a/V8Binding/v8/test/cctest/test-heap-profiler.cc b/V8Binding/v8/test/cctest/test-heap-profiler.cc
new file mode 100644
index 0000000..b199507
--- /dev/null
+++ b/V8Binding/v8/test/cctest/test-heap-profiler.cc
@@ -0,0 +1,396 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+//
+// Tests for heap profiler
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "v8.h"
+#include "heap-profiler.h"
+#include "string-stream.h"
+#include "cctest.h"
+
+namespace i = v8::internal;
+using i::ClustersCoarser;
+using i::JSObjectsCluster;
+using i::JSObjectsRetainerTree;
+using i::JSObjectsClusterTree;
+using i::RetainerHeapProfile;
+
+
+static void CompileAndRunScript(const char *src) {
+ v8::Script::Compile(v8::String::New(src))->Run();
+}
+
+
+namespace {
+
+class ConstructorHeapProfileTestHelper : public i::ConstructorHeapProfile {
+ public:
+ ConstructorHeapProfileTestHelper()
+ : i::ConstructorHeapProfile(),
+ f_name_(i::Factory::NewStringFromAscii(i::CStrVector("F"))),
+ f_count_(0) {
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const i::NumberAndSizeInfo& number_and_size) {
+ if (f_name_->Equals(cluster.constructor())) {
+ CHECK_EQ(f_count_, 0);
+ f_count_ = number_and_size.number();
+ CHECK_GT(f_count_, 0);
+ }
+ }
+
+ int f_count() { return f_count_; }
+
+ private:
+ i::Handle<i::String> f_name_;
+ int f_count_;
+};
+
+} // namespace
+
+
+TEST(ConstructorProfile) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ CompileAndRunScript(
+ "function F() {} // A constructor\n"
+ "var f1 = new F();\n"
+ "var f2 = new F();\n");
+
+ ConstructorHeapProfileTestHelper cons_profile;
+ i::AssertNoAllocation no_alloc;
+ i::HeapIterator iterator;
+ while (iterator.has_next()) {
+ i::HeapObject* obj = iterator.next();
+ cons_profile.CollectStats(obj);
+ }
+ CHECK_EQ(0, cons_profile.f_count());
+ cons_profile.PrintStats();
+ CHECK_EQ(2, cons_profile.f_count());
+}
+
+
+static JSObjectsCluster AddHeapObjectToTree(JSObjectsRetainerTree* tree,
+ i::String* constructor,
+ int instance,
+ JSObjectsCluster* ref1 = NULL,
+ JSObjectsCluster* ref2 = NULL,
+ JSObjectsCluster* ref3 = NULL) {
+ JSObjectsCluster o(constructor, reinterpret_cast<i::Object*>(instance));
+ JSObjectsClusterTree* o_tree = new JSObjectsClusterTree();
+ JSObjectsClusterTree::Locator o_loc;
+ if (ref1 != NULL) o_tree->Insert(*ref1, &o_loc);
+ if (ref2 != NULL) o_tree->Insert(*ref2, &o_loc);
+ if (ref3 != NULL) o_tree->Insert(*ref3, &o_loc);
+ JSObjectsRetainerTree::Locator loc;
+ tree->Insert(o, &loc);
+ loc.set_value(o_tree);
+ return o;
+}
+
+
+static void AddSelfReferenceToTree(JSObjectsRetainerTree* tree,
+ JSObjectsCluster* self_ref) {
+ JSObjectsRetainerTree::Locator loc;
+ CHECK(tree->Find(*self_ref, &loc));
+ JSObjectsClusterTree::Locator o_loc;
+ CHECK_NE(NULL, loc.value());
+ loc.value()->Insert(*self_ref, &o_loc);
+}
+
+
+static inline void CheckEqualsHelper(const char* file, int line,
+ const char* expected_source,
+ const JSObjectsCluster& expected,
+ const char* value_source,
+ const JSObjectsCluster& value) {
+ if (JSObjectsCluster::Compare(expected, value) != 0) {
+ i::HeapStringAllocator allocator;
+ i::StringStream stream(&allocator);
+ stream.Add("# Expected: ");
+ expected.DebugPrint(&stream);
+ stream.Add("\n# Found: ");
+ value.DebugPrint(&stream);
+ V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n%s",
+ expected_source, value_source,
+ *stream.ToCString());
+ }
+}
+
+
+static inline void CheckNonEqualsHelper(const char* file, int line,
+ const char* expected_source,
+ const JSObjectsCluster& expected,
+ const char* value_source,
+ const JSObjectsCluster& value) {
+ if (JSObjectsCluster::Compare(expected, value) == 0) {
+ i::HeapStringAllocator allocator;
+ i::StringStream stream(&allocator);
+ stream.Add("# !Expected: ");
+ expected.DebugPrint(&stream);
+ stream.Add("\n# Found: ");
+ value.DebugPrint(&stream);
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n%s",
+ expected_source, value_source,
+ *stream.ToCString());
+ }
+}
+
+
+TEST(ClustersCoarserSimple) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+ JSObjectsCluster function(i::Heap::function_class_symbol());
+ JSObjectsCluster a(*i::Factory::NewStringFromAscii(i::CStrVector("A")));
+ JSObjectsCluster b(*i::Factory::NewStringFromAscii(i::CStrVector("B")));
+
+ // o1 <- Function
+ JSObjectsCluster o1 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100, &function);
+ // o2 <- Function
+ JSObjectsCluster o2 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x200, &function);
+ // o3 <- A, B
+ JSObjectsCluster o3 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &a, &b);
+ // o4 <- B, A
+ JSObjectsCluster o4 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x400, &b, &a);
+ // o5 <- A, B, Function
+ JSObjectsCluster o5 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x500,
+ &a, &b, &function);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o3), coarser.GetCoarseEquivalent(o4));
+ CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o3));
+ CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o5));
+}
+
+
+TEST(ClustersCoarserMultipleConstructors) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+ JSObjectsCluster function(i::Heap::function_class_symbol());
+
+ // o1 <- Function
+ JSObjectsCluster o1 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100, &function);
+ // a1 <- Function
+ JSObjectsCluster a1 =
+ AddHeapObjectToTree(&tree, i::Heap::Array_symbol(), 0x1000, &function);
+ // o2 <- Function
+ JSObjectsCluster o2 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x200, &function);
+ // a2 <- Function
+ JSObjectsCluster a2 =
+ AddHeapObjectToTree(&tree, i::Heap::Array_symbol(), 0x2000, &function);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
+ CHECK_EQ(coarser.GetCoarseEquivalent(a1), coarser.GetCoarseEquivalent(a2));
+}
+
+
+TEST(ClustersCoarserPathsTraversal) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+
+ // On the following graph:
+ //
+ // p
+ // <- o21 <- o11 <-
+ // q o
+ // <- o22 <- o12 <-
+ // r
+ //
+ // we expect that coarser will deduce equivalences: p ~ q ~ r,
+ // o21 ~ o22, and o11 ~ o12.
+
+ JSObjectsCluster o =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ JSObjectsCluster o11 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ JSObjectsCluster o12 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ JSObjectsCluster o21 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x210, &o11);
+ JSObjectsCluster o22 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x220, &o12);
+ JSObjectsCluster p =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o21);
+ JSObjectsCluster q =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o21, &o22);
+ JSObjectsCluster r =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o22);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o11));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o12));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(o22));
+ CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(o21));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
+ CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
+ CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
+ CHECK_NE(coarser.GetCoarseEquivalent(o11), coarser.GetCoarseEquivalent(p));
+ CHECK_NE(coarser.GetCoarseEquivalent(o21), coarser.GetCoarseEquivalent(p));
+}
+
+
+TEST(ClustersCoarserSelf) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ i::ZoneScope zn_scope(i::DELETE_ON_EXIT);
+
+ JSObjectsRetainerTree tree;
+
+ // On the following graph:
+ //
+ // p (self-referencing)
+ // <- o1 <-
+ // q (self-referencing) o
+ // <- o2 <-
+ // r (self-referencing)
+ //
+ // we expect that coarser will deduce equivalences: p ~ q ~ r, o1 ~ o2;
+
+ JSObjectsCluster o =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x100);
+ JSObjectsCluster o1 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x110, &o);
+ JSObjectsCluster o2 =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x120, &o);
+ JSObjectsCluster p =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x300, &o1);
+ AddSelfReferenceToTree(&tree, &p);
+ JSObjectsCluster q =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x310, &o1, &o2);
+ AddSelfReferenceToTree(&tree, &q);
+ JSObjectsCluster r =
+ AddHeapObjectToTree(&tree, i::Heap::Object_symbol(), 0x320, &o2);
+ AddSelfReferenceToTree(&tree, &r);
+
+ ClustersCoarser coarser;
+ coarser.Process(&tree);
+
+ CHECK_EQ(JSObjectsCluster(), coarser.GetCoarseEquivalent(o));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(o1));
+ CHECK_EQ(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(o2));
+ CHECK_NE(JSObjectsCluster(), coarser.GetCoarseEquivalent(p));
+ CHECK_EQ(coarser.GetCoarseEquivalent(p), coarser.GetCoarseEquivalent(q));
+ CHECK_EQ(coarser.GetCoarseEquivalent(q), coarser.GetCoarseEquivalent(r));
+ CHECK_NE(coarser.GetCoarseEquivalent(o1), coarser.GetCoarseEquivalent(p));
+}
+
+
+namespace {
+
+class RetainerProfilePrinter : public RetainerHeapProfile::Printer {
+ public:
+ RetainerProfilePrinter() : stream_(&allocator_), lines_(100) {}
+
+ void PrintRetainers(const JSObjectsCluster& cluster,
+ const i::StringStream& retainers) {
+ cluster.Print(&stream_);
+ stream_.Add("%s", *(retainers.ToCString()));
+ stream_.Put('\0');
+ }
+
+ const char* GetRetainers(const char* constructor) {
+ FillLines();
+ const size_t cons_len = strlen(constructor);
+ for (int i = 0; i < lines_.length(); ++i) {
+ if (strncmp(constructor, lines_[i], cons_len) == 0 &&
+ lines_[i][cons_len] == ',') {
+ return lines_[i] + cons_len + 1;
+ }
+ }
+ return NULL;
+ }
+
+ private:
+ void FillLines() {
+ if (lines_.length() > 0) return;
+ stream_.Put('\0');
+ stream_str_ = stream_.ToCString();
+ const char* pos = *stream_str_;
+ while (pos != NULL && *pos != '\0') {
+ lines_.Add(pos);
+ pos = strchr(pos, '\0');
+ if (pos != NULL) ++pos;
+ }
+ }
+
+ i::HeapStringAllocator allocator_;
+ i::StringStream stream_;
+ i::SmartPointer<const char> stream_str_;
+ i::List<const char*> lines_;
+};
+
+} // namespace
+
+
+TEST(RetainerProfile) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ CompileAndRunScript(
+ "function A() {}\n"
+ "function B(x) { this.x = x; }\n"
+ "function C(x) { this.x1 = x; this.x2 = x; }\n"
+ "var a = new A();\n"
+ "var b1 = new B(a), b2 = new B(a);\n"
+ "var c = new C(a);");
+
+ RetainerHeapProfile ret_profile;
+ i::AssertNoAllocation no_alloc;
+ i::HeapIterator iterator;
+ while (iterator.has_next()) {
+ i::HeapObject* obj = iterator.next();
+ ret_profile.CollectStats(obj);
+ }
+ RetainerProfilePrinter printer;
+ ret_profile.DebugPrintStats(&printer);
+ const char* retainers_of_a = printer.GetRetainers("A");
+ // The order of retainers is unspecified, so we check string length, and
+ // verify each retainer separately.
+ CHECK_EQ(static_cast<int>(strlen("(global property);1,B;2,C;2")),
+ static_cast<int>(strlen(retainers_of_a)));
+ CHECK(strstr(retainers_of_a, "(global property);1") != NULL);
+ CHECK(strstr(retainers_of_a, "B;2") != NULL);
+ CHECK(strstr(retainers_of_a, "C;2") != NULL);
+ CHECK_EQ("(global property);2", printer.GetRetainers("B"));
+ CHECK_EQ("(global property);1", printer.GetRetainers("C"));
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/V8Binding/v8/test/cctest/test-log.cc b/V8Binding/v8/test/cctest/test-log.cc
index dafd3aa..65ab50a 100644
--- a/V8Binding/v8/test/cctest/test-log.cc
+++ b/V8Binding/v8/test/cctest/test-log.cc
@@ -401,13 +401,6 @@ class TestSampler : public v8::internal::Sampler {
} // namespace
TEST(ProfMultipleThreads) {
- // V8 needs to be initialized before the first Locker
- // instantiation. Otherwise, Top::Initialize will reset
- // thread_id_ in ThreadTopLocal.
- v8::HandleScope scope;
- v8::Handle<v8::Context> env = v8::Context::New();
- env->Enter();
-
LoopingJsThread jsThread;
jsThread.Start();
LoopingNonJsThread nonJsThread;
diff --git a/V8Binding/v8/test/cctest/test-sockets.cc b/V8Binding/v8/test/cctest/test-sockets.cc
index a4b2285..822a23f 100644
--- a/V8Binding/v8/test/cctest/test-sockets.cc
+++ b/V8Binding/v8/test/cctest/test-sockets.cc
@@ -42,6 +42,7 @@ void SocketListenerThread::Run() {
// Create the server socket and bind it to the requested port.
server_ = OS::CreateSocket();
+ server_->SetReuseAddress(true);
CHECK(server_ != NULL);
ok = server_->Bind(port_);
CHECK(ok);
diff --git a/V8Binding/v8/test/cctest/test-strings.cc b/V8Binding/v8/test/cctest/test-strings.cc
index 127b7a2..bb9a6f9 100644
--- a/V8Binding/v8/test/cctest/test-strings.cc
+++ b/V8Binding/v8/test/cctest/test-strings.cc
@@ -48,6 +48,21 @@ static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024;
+class Resource: public v8::String::ExternalStringResource,
+ public ZoneObject {
+ public:
+ explicit Resource(Vector<const uc16> string): data_(string.start()) {
+ length_ = string.length();
+ }
+ virtual const uint16_t* data() const { return data_; }
+ virtual size_t length() const { return length_; }
+
+ private:
+ const uc16* data_;
+ size_t length_;
+};
+
+
static void InitializeBuildingBlocks(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
// A list of pointers that we don't have any interest in cleaning up.
@@ -83,19 +98,6 @@ static void InitializeBuildingBlocks(
break;
}
case 2: {
- class Resource: public v8::String::ExternalStringResource,
- public ZoneObject {
- public:
- explicit Resource(Vector<const uc16> string): data_(string.start()) {
- length_ = string.length();
- }
- virtual const uint16_t* data() const { return data_; }
- virtual size_t length() const { return length_; }
-
- private:
- const uc16* data_;
- size_t length_;
- };
uc16* buf = Zone::NewArray<uc16>(len);
for (int j = 0; j < len; j++) {
buf[j] = gen() % 65536;
diff --git a/V8Binding/v8/test/es5conform/README b/V8Binding/v8/test/es5conform/README
new file mode 100644
index 0000000..a88f4a3
--- /dev/null
+++ b/V8Binding/v8/test/es5conform/README
@@ -0,0 +1,14 @@
+This directory contains code for binding the es5conform test suite
+into the v8 test harness. To use the tests check out the es5conform
+tests from
+
+ https://es5conform.svn.codeplex.com/svn
+
+in revision 59101 as 'data' in this directory. Using later version
+may be possible but the tests are only known to pass (and indeed run)
+with that revision.
+
+If you do update to a newer revision you may have to change the test
+harness adapter code since it uses internal functionality from the
+harness that comes bundled with the tests. You will most likely also
+have to update the test expectation file.
diff --git a/V8Binding/v8/test/es5conform/es5conform.status b/V8Binding/v8/test/es5conform/es5conform.status
new file mode 100644
index 0000000..49cffb2
--- /dev/null
+++ b/V8Binding/v8/test/es5conform/es5conform.status
@@ -0,0 +1,68 @@
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+prefix es5conform
+def UNIMPLEMENTED = PASS || FAIL
+def FAIL_OK = FAIL, OKAY
+
+chapter07: UNIMPLEMENTED
+chapter08: UNIMPLEMENTED
+chapter10: UNIMPLEMENTED
+chapter11: UNIMPLEMENTED
+chapter12: UNIMPLEMENTED
+chapter13: UNIMPLEMENTED
+chapter14: UNIMPLEMENTED
+chapter15/15.1: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.2: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.3: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.4: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.7: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
+chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
+
+# Object.keys
+chapter15/15.2/15.2.3/15.2.3.14: PASS
+
+# We fail this because Object.keys returns numbers for element indices
+# rather than strings.
+chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
+
+chapter15/15.3: UNIMPLEMENTED
+chapter15/15.4: UNIMPLEMENTED
+chapter15/15.5: UNIMPLEMENTED
+chapter15/15.6: UNIMPLEMENTED
+chapter15/15.7: UNIMPLEMENTED
+chapter15/15.9: UNIMPLEMENTED
+chapter15/15.10: UNIMPLEMENTED
+chapter15/15.12: UNIMPLEMENTED
diff --git a/V8Binding/v8/test/es5conform/harness-adapt.js b/V8Binding/v8/test/es5conform/harness-adapt.js
new file mode 100644
index 0000000..396d4ed
--- /dev/null
+++ b/V8Binding/v8/test/es5conform/harness-adapt.js
@@ -0,0 +1,74 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var global = this;
+
+function ES5Error(ut) {
+ this.ut = ut;
+}
+
+ES5Error.prototype.toString = function () {
+ return this.ut.res;
+};
+
+// The harness uses the IE specific .description property of exceptions but
+// that's nothing we can't hack our way around.
+Error.prototype.__defineGetter__('description', function () {
+ return this.message;
+});
+
+function TestHarness() {
+ sth.call(this, global);
+ this._testResults = []
+}
+
+// Borrow sth's registerTest method.
+TestHarness.prototype.registerTest = sth.prototype.registerTest;
+
+// Drop the before/after stuff, just run the test.
+TestHarness.prototype.startTesting = function () {
+ sth.prototype.run.call(this);
+ this.report();
+};
+
+TestHarness.prototype.report = function () {
+ for (var i = 0; i < this._testResults.length; i++) {
+ var ut = this._testResults[i];
+ // We don't fail on preconditions. Yet.
+ if (ut.res == "Precondition failed")
+ continue;
+ if (ut.res != 'pass')
+ throw new ES5Error(ut);
+ }
+};
+
+TestHarness.prototype.startingTest = function (ut) {
+ this.currentTest = ut;
+ this._testResults.push(ut);
+};
+
+var ES5Harness = new TestHarness();
diff --git a/V8Binding/v8/test/es5conform/testcfg.py b/V8Binding/v8/test/es5conform/testcfg.py
new file mode 100644
index 0000000..d1f23aa
--- /dev/null
+++ b/V8Binding/v8/test/es5conform/testcfg.py
@@ -0,0 +1,108 @@
+# Copyright 2008 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import test
+import os
+from os.path import join, exists
+
+
+HARNESS_FILES = ['sth.js']
+
+
+class ES5ConformTestCase(test.TestCase):
+
+ def __init__(self, filename, path, context, root, mode, framework):
+ super(ES5ConformTestCase, self).__init__(context, path)
+ self.filename = filename
+ self.mode = mode
+ self.framework = framework
+ self.root = root
+
+ def IsNegative(self):
+ return self.filename.endswith('-n.js')
+
+ def GetLabel(self):
+ return "%s es5conform %s" % (self.mode, self.GetName())
+
+ def IsFailureOutput(self, output):
+ if output.exit_code != 0:
+ return True
+ return 'FAILED!' in output.stdout
+
+ def GetCommand(self):
+ result = [self.context.GetVm(self.mode)]
+ result += ['-e', 'var window = this']
+ result += self.framework
+ result.append(self.filename)
+ result += ['-e', 'ES5Harness.startTesting()']
+ return result
+
+ def GetName(self):
+ return self.path[-1]
+
+ def GetSource(self):
+ return open(self.filename).read()
+
+
+class ES5ConformTestConfiguration(test.TestConfiguration):
+
+ def __init__(self, context, root):
+ super(ES5ConformTestConfiguration, self).__init__(context, root)
+
+ def ListTests(self, current_path, path, mode):
+ tests = []
+ current_root = join(self.root, 'data', 'TestCases')
+ harness = []
+ harness += [join(self.root, 'data', 'SimpleTestHarness', f) for f in HARNESS_FILES]
+ harness += [join(self.root, 'harness-adapt.js')]
+ for root, dirs, files in os.walk(current_root):
+ for dotted in [x for x in dirs if x.startswith('.')]:
+ dirs.remove(dotted)
+ root_path = root[len(self.root):].split(os.path.sep)
+ root_path = current_path + [x for x in root_path if x]
+ for file in files:
+ if file.endswith('.js'):
+ full_path = root_path + [file[:-3]]
+ full_path = [x for x in full_path if not (x in ['data', 'TestCases'])]
+ if self.Contains(path, full_path):
+ test = ES5ConformTestCase(join(root, file), full_path, self.context,
+ self.root, mode, harness)
+ tests.append(test)
+ return tests
+
+ def GetBuildRequirements(self):
+ return ['sample', 'sample=shell']
+
+ def GetTestStatus(self, sections, defs):
+ status_file = join(self.root, 'es5conform.status')
+ if exists(status_file):
+ test.ReadConfigurationInto(status_file, sections, defs)
+
+
+def GetConfiguration(context, root):
+ return ES5ConformTestConfiguration(context, root)
diff --git a/V8Binding/v8/test/mjsunit/arguments-enum.js b/V8Binding/v8/test/mjsunit/arguments-enum.js
index f76240f..3aee918 100644
--- a/V8Binding/v8/test/mjsunit/arguments-enum.js
+++ b/V8Binding/v8/test/mjsunit/arguments-enum.js
@@ -42,11 +42,11 @@ function setArgumentCount() {
}
assertEquals(0, countArguments());
-assertEquals(0, countArguments(1));
-assertEquals(0, countArguments(1, 2));
-assertEquals(0, countArguments(1, 2, 3, 4, 5));
+assertEquals(1, countArguments(1));
+assertEquals(2, countArguments(1, 2));
+assertEquals(5, countArguments(1, 2, 3, 4, 5));
-assertEquals(0, setArgumentCount());
-assertEquals(0, setArgumentCount(1));
-assertEquals(0, setArgumentCount(1, 2));
-assertEquals(0, setArgumentCount(1, 2, 3, 4, 5));
+assertEquals(2, setArgumentCount());
+assertEquals(3, setArgumentCount(1));
+assertEquals(4, setArgumentCount(1, 2));
+assertEquals(7, setArgumentCount(1, 2, 3, 4, 5));
diff --git a/V8Binding/v8/test/mjsunit/array-constructor.js b/V8Binding/v8/test/mjsunit/array-constructor.js
new file mode 100644
index 0000000..063ccde
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/array-constructor.js
@@ -0,0 +1,119 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+var loop_count = 5
+
+
+for (var i = 0; i < loop_count; i++) {
+ var a = new Array();
+ var b = Array();
+ assertEquals(0, a.length);
+ assertEquals(0, b.length);
+ for (var k = 0; k < 10; k++) {
+ assertEquals('undefined', typeof a[k]);
+ assertEquals('undefined', typeof b[k]);
+ }
+}
+
+
+for (var i = 0; i < loop_count; i++) {
+ for (var j = 0; j < 100; j++) {
+ var a = new Array(j);
+ var b = Array(j);
+ assertEquals(j, a.length);
+ assertEquals(j, b.length);
+ for (var k = 0; k < j; k++) {
+ assertEquals('undefined', typeof a[k]);
+ assertEquals('undefined', typeof b[k]);
+ }
+ }
+}
+
+
+for (var i = 0; i < loop_count; i++) {
+ a = new Array(0, 1);
+ assertArrayEquals([0, 1], a);
+ a = new Array(0, 1, 2);
+ assertArrayEquals([0, 1, 2], a);
+ a = new Array(0, 1, 2, 3);
+ assertArrayEquals([0, 1, 2, 3], a);
+ a = new Array(0, 1, 2, 3, 4);
+ assertArrayEquals([0, 1, 2, 3, 4], a);
+ a = new Array(0, 1, 2, 3, 4, 5);
+ assertArrayEquals([0, 1, 2, 3, 4, 5], a);
+ a = new Array(0, 1, 2, 3, 4, 5, 6);
+ assertArrayEquals([0, 1, 2, 3, 4, 5, 6], a);
+ a = new Array(0, 1, 2, 3, 4, 5, 6, 7);
+ assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7], a);
+ a = new Array(0, 1, 2, 3, 4, 5, 6, 7, 8);
+ assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7, 8], a);
+ a = new Array(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+ assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], a);
+}
+
+
+function innerArrayLiteral(n) {
+ var a = new Array(n);
+ for (var i = 0; i < n; i++) {
+ a[i] = i * 2 + 7;
+ }
+ return a.join();
+}
+
+
+function testConstructOfSizeSize(n) {
+ var str = innerArrayLiteral(n);
+ var a = eval('[' + str + ']');
+ var b = eval('new Array(' + str + ')')
+ var c = eval('Array(' + str + ')')
+ assertEquals(n, a.length);
+ assertArrayEquals(a, b);
+ assertArrayEquals(a, c);
+}
+
+
+for (var i = 0; i < loop_count; i++) {
+ // JSObject::kInitialMaxFastElementArray is 10000.
+ for (var j = 1000; j < 12000; j += 1000) {
+ testConstructOfSizeSize(j);
+ }
+}
+
+
+for (var i = 0; i < loop_count; i++) {
+ assertArrayEquals(['xxx'], new Array('xxx'));
+ assertArrayEquals(['xxx'], Array('xxx'));
+ assertArrayEquals([true], new Array(true));
+ assertArrayEquals([false], Array(false));
+ assertArrayEquals([{a:1}], new Array({a:1}));
+ assertArrayEquals([{b:2}], Array({b:2}));
+}
+
+
+assertThrows('new Array(3.14)');
+assertThrows('Array(2.72)');
diff --git a/V8Binding/v8/test/mjsunit/array-splice.js b/V8Binding/v8/test/mjsunit/array-splice.js
index d308ef5..0543c32 100644
--- a/V8Binding/v8/test/mjsunit/array-splice.js
+++ b/V8Binding/v8/test/mjsunit/array-splice.js
@@ -309,3 +309,6 @@ Array.prototype[1] = 1;
assertEquals(1, arr.pop());
assertEquals(0, arr.pop());
Array.prototype[1] = undefined;
+
+// Test http://code.google.com/p/chromium/issues/detail?id=21860
+Array.prototype.push.apply([], [1].splice(0, -(-1 % 5)));
diff --git a/V8Binding/v8/test/mjsunit/class-of-builtins.js b/V8Binding/v8/test/mjsunit/class-of-builtins.js
index 40c958c..59fefff 100644
--- a/V8Binding/v8/test/mjsunit/class-of-builtins.js
+++ b/V8Binding/v8/test/mjsunit/class-of-builtins.js
@@ -35,7 +35,7 @@ var funs = {
Boolean: [ Boolean ],
Number: [ Number ],
Date: [ Date ],
- RegExp: [ RegExp ],
+ RegExp: [ RegExp ],
Error: [ Error, TypeError, RangeError, SyntaxError, ReferenceError, EvalError, URIError ]
}
for (f in funs) {
diff --git a/V8Binding/v8/test/mjsunit/debug-compile-event.js b/V8Binding/v8/test/mjsunit/debug-compile-event.js
index c346f76..4804ac7 100644
--- a/V8Binding/v8/test/mjsunit/debug-compile-event.js
+++ b/V8Binding/v8/test/mjsunit/debug-compile-event.js
@@ -102,10 +102,10 @@ Debug.setListener(listener);
// Compile different sources.
compileSource('a=1');
-compileSource('function(){}');
+compileSource('(function(){})');
compileSource('eval("a=2")');
source_count++; // Using eval causes additional compilation event.
-compileSource('eval("eval(\'function(){return a;}\')")');
+compileSource('eval("eval(\'(function(){return a;})\')")');
source_count += 2; // Using eval causes additional compilation event.
compileSource('JSON.parse("{a:1,b:2}")');
source_count++; // Using JSON.parse causes additional compilation event.
diff --git a/V8Binding/v8/test/mjsunit/debug-scopes.js b/V8Binding/v8/test/mjsunit/debug-scopes.js
index 7b477e1..e87cbb7 100644
--- a/V8Binding/v8/test/mjsunit/debug-scopes.js
+++ b/V8Binding/v8/test/mjsunit/debug-scopes.js
@@ -140,6 +140,11 @@ function CheckScopeContent(content, number, exec_state) {
if (!scope.scopeObject().property('arguments').isUndefined()) {
scope_size--;
}
+ // Also ignore synthetic variable from catch block.
+ if (!scope.scopeObject().property('.catch-var').isUndefined()) {
+ scope_size--;
+ }
+
if (count != scope_size) {
print('Names found in scope:');
var names = scope.scopeObject().propertyNames();
@@ -656,5 +661,101 @@ listener_delegate = function(exec_state) {
debugger;
EndTest();
+
+BeginTest("Catch block 1");
+function catch_block_1() {
+ try {
+ throw 'Exception';
+ } catch (e) {
+ debugger;
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+}
+catch_block_1()
+EndTest();
+
+
+BeginTest("Catch block 2");
+function catch_block_2() {
+ try {
+ throw 'Exception';
+ } catch (e) {
+ with({n:10}) {
+ debugger;
+ }
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({n:10}, 0, exec_state);
+ CheckScopeContent({e:'Exception'}, 1, exec_state);
+}
+catch_block_2()
+EndTest();
+
+
+BeginTest("Catch block 3");
+function catch_block_1() {
+ // Do eval to dynamically declare a local variable so that the context's
+ // extension slot is initialized with JSContextExtensionObject.
+ eval("var y = 78;");
+ try {
+ throw 'Exception';
+ } catch (e) {
+ debugger;
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeContent({y:78}, 1, exec_state);
+}
+catch_block_1()
+EndTest();
+
+
+BeginTest("Catch block 4");
+function catch_block_2() {
+ // Do eval to dynamically declare a local variable so that the context's
+ // extension slot is initialized with JSContextExtensionObject.
+ eval("var y = 98;");
+ try {
+ throw 'Exception';
+ } catch (e) {
+ with({n:10}) {
+ debugger;
+ }
+ }
+};
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({n:10}, 0, exec_state);
+ CheckScopeContent({e:'Exception'}, 1, exec_state);
+ CheckScopeContent({y:98}, 2, exec_state);
+}
+catch_block_2()
+EndTest();
+
+
assertEquals(begin_test_count, break_count, 'one or more tests did not enter the debugger');
assertEquals(begin_test_count, end_test_count, 'one or more tests did not have its result checked');
diff --git a/V8Binding/v8/test/mjsunit/debug-stepout-recursive-function.js b/V8Binding/v8/test/mjsunit/debug-stepout-recursive-function.js
new file mode 100644
index 0000000..2f8780c
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/debug-stepout-recursive-function.js
@@ -0,0 +1,106 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var step_out_count = 1;
+
+// Simple debug event handler which counts the number of breaks hit and steps.
+var break_point_hit_count = 0;
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ break_point_hit_count++;
+ // Continue stepping until returned to bottom frame.
+ if (exec_state.frameCount() > 1) {
+ exec_state.prepareStep(Debug.StepAction.StepOut, step_out_count);
+ }
+
+ }
+ } catch(e) {
+ exception = e;
+ }
+
+};
+
+function BeginTest(name) {
+ test_name = name;
+ break_point_hit_count = 0;
+ exception = null;
+}
+
+function EndTest(expected_break_point_hit_count) {
+ assertEquals(expected_break_point_hit_count, break_point_hit_count, test_name);
+ assertNull(exception, test_name);
+ test_name = null;
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+var shouldBreak = null;
+function fact(x) {
+ if (shouldBreak(x)) {
+ debugger;
+ }
+ if (x < 2) {
+ return 1;
+ } else {
+ return x*fact(x-1);
+ }
+}
+
+BeginTest('Test 1');
+shouldBreak = function(x) { return x == 3; };
+step_out_count = 1;
+fact(3);
+EndTest(2);
+
+BeginTest('Test 2');
+shouldBreak = function(x) { return x == 2; };
+step_out_count = 1;
+fact(3);
+EndTest(3);
+
+BeginTest('Test 3');
+shouldBreak = function(x) { return x == 1; };
+step_out_count = 2;
+fact(3);
+EndTest(2);
+
+BeginTest('Test 4');
+shouldBreak = function(x) { print(x); return x == 1 || x == 3; };
+step_out_count = 2;
+fact(3);
+EndTest(3);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/V8Binding/v8/test/mjsunit/debug-stepout-to-builtin.js b/V8Binding/v8/test/mjsunit/debug-stepout-to-builtin.js
new file mode 100644
index 0000000..486eee0
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/debug-stepout-to-builtin.js
@@ -0,0 +1,84 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step out' action
+// and than check that execution is paused inside function
+// expected_function_name.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepOut, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_function_name, event_data.func().name());
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var obj = {key:10};
+
+function replacer(key, value) {
+ if (key == 'key') {
+ debugger;
+ }
+ return value;
+}
+
+// Test step into function call from a function without local variables.
+function testStepOutToBuiltIn() {
+ expected_function_name = 'testStepOutToBuiltIn';
+ expected_source_line_text = '} // expected line';
+ JSON.stringify(obj, replacer);
+} // expected line
+
+state = 1;
+testStepOutToBuiltIn();
+assertNull(exception);
+assertEquals(3, state);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/V8Binding/v8/test/mjsunit/function-prototype.js b/V8Binding/v8/test/mjsunit/function-prototype.js
index 371311e..c5a5487 100644
--- a/V8Binding/v8/test/mjsunit/function-prototype.js
+++ b/V8Binding/v8/test/mjsunit/function-prototype.js
@@ -90,8 +90,9 @@ assertEquals(F.prototype, GetPrototypeOf(F));
// in GetPrototypeOf and go to a monomorphic IC load instead.
assertEquals(87, GetPrototypeOf({prototype:87}));
-// Check the prototype is enumerable as specified in ECMA262, 15.3.5.2
+// Check the prototype is not enumerable, for compatibility with
+// safari. This is deliberately incompatible with ECMA262, 15.3.5.2.
var foo = new Function("return x");
var result = ""
for (var n in foo) result += n;
-assertEquals(result, "prototype");
+assertEquals(result, "");
diff --git a/V8Binding/v8/test/mjsunit/invalid-lhs.js b/V8Binding/v8/test/mjsunit/invalid-lhs.js
index bbd19f2..ef63add 100644
--- a/V8Binding/v8/test/mjsunit/invalid-lhs.js
+++ b/V8Binding/v8/test/mjsunit/invalid-lhs.js
@@ -25,9 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test that we get exceptions for invalid left-hand sides. Also
-// tests that if the invalid left-hand side is a function call, the
-// exception is delayed until runtime.
+// Test that we get exceptions for invalid left-hand sides. The
+// exceptions are delayed until runtime.
// Normal assignments:
assertThrows("12 = 12");
@@ -57,12 +56,10 @@ assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);");
// Assignments to 'this'.
assertThrows("this = 42");
-assertThrows("function f() { this = 12; }");
-assertThrows("for (this in Array) ;");
+assertDoesNotThrow("function f() { this = 12; }");
+assertThrows("for (this in {x:3, y:4, z:5}) ;");
assertThrows("for (this = 0;;) ;");
assertThrows("this++");
assertThrows("++this");
assertThrows("this--");
assertThrows("--this");
-
-
diff --git a/V8Binding/v8/test/mjsunit/array-splice-webkit.js b/V8Binding/v8/test/mjsunit/invalid-source-element.js
index 113a56a..fb012e2 100644
--- a/V8Binding/v8/test/mjsunit/array-splice-webkit.js
+++ b/V8Binding/v8/test/mjsunit/invalid-source-element.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,36 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Simple splice tests based on webkit layout tests.
-var arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['c','d'], arr.splice(2));
-assertArrayEquals(['a','b'], arr);
-assertArrayEquals(['a','b'], arr.splice(0));
-assertArrayEquals([], arr)
-
-arr = ['a','b','c','d'];
-assertEquals(undefined, arr.splice())
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr.splice(null))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals([], arr.splice(100))
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['d'], arr.splice(-1))
-assertArrayEquals(['a','b','c'], arr);
-
-assertArrayEquals([], arr.splice(2, undefined))
-assertArrayEquals([], arr.splice(2, null))
-assertArrayEquals([], arr.splice(2, -1))
-assertArrayEquals([], arr.splice(2, 0))
-assertArrayEquals(['a','b','c'], arr);
-assertArrayEquals(['c'], arr.splice(2, 100))
-assertArrayEquals(['a','b'], arr);
-
+// A function expression with no parenthesis around it is not a valid
+// expression statement.
+assertThrows("eval('function() {}')");
diff --git a/V8Binding/v8/test/mjsunit/mirror-script.js b/V8Binding/v8/test/mjsunit/mirror-script.js
index 9b67b9b..3208f16 100644
--- a/V8Binding/v8/test/mjsunit/mirror-script.js
+++ b/V8Binding/v8/test/mjsunit/mirror-script.js
@@ -85,16 +85,16 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
testScriptMirror(function(){}, 'mirror-script.js', 100, 2, 0);
testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
-testScriptMirror(eval('function(){}'), null, 1, 2, 1, 'function(){}', 87);
-testScriptMirror(eval('function(){\n }'), null, 2, 2, 1, 'function(){\n }', 88);
+testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
+testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
testScriptMirror(%CompileString("({a:1,b:2})", true), null, 1, 2, 2, '({a:1,b:2})');
testScriptMirror(%CompileString("({a:1,\n b:2})", true), null, 2, 2, 2, '({a:1,\n b:2})');
// Test taking slices of source.
-var mirror = debug.MakeMirror(eval('function(){\n 1;\n}')).script();
-assertEquals('function(){\n', mirror.sourceSlice(0, 1).sourceText());
+var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
+assertEquals('(function(){\n', mirror.sourceSlice(0, 1).sourceText());
assertEquals(' 1;\n', mirror.sourceSlice(1, 2).sourceText());
-assertEquals('}', mirror.sourceSlice(2, 3).sourceText());
-assertEquals('function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
-assertEquals(' 1;\n}', mirror.sourceSlice(1, 3).sourceText());
-assertEquals('function(){\n 1;\n}', mirror.sourceSlice(0, 3).sourceText());
+assertEquals('})', mirror.sourceSlice(2, 3).sourceText());
+assertEquals('(function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
+assertEquals(' 1;\n})', mirror.sourceSlice(1, 3).sourceText());
+assertEquals('(function(){\n 1;\n})', mirror.sourceSlice(0, 3).sourceText());
diff --git a/V8Binding/v8/test/mjsunit/mjsunit.status b/V8Binding/v8/test/mjsunit/mjsunit.status
index 839329d..0b069cc 100644
--- a/V8Binding/v8/test/mjsunit/mjsunit.status
+++ b/V8Binding/v8/test/mjsunit/mjsunit.status
@@ -41,33 +41,11 @@ big-object-literal: PASS, SKIP if ($arch == arm)
# Slow tests which times out in debug mode.
try: PASS, SKIP if $mode == debug
debug-scripts-request: PASS, SKIP if $mode == debug
+array-constructor: PASS, SKIP if $mode == debug
# Flaky test that can hit compilation-time stack overflow in debug mode.
unicode-test: PASS, (PASS || FAIL) if $mode == debug
-# Bug number 1020483: Debug tests fail on ARM.
-debug-constructor: CRASH, FAIL
-debug-continue: SKIP
-debug-evaluate-recursive: CRASH || FAIL
-debug-changebreakpoint: CRASH || FAIL
-debug-clearbreakpoint: CRASH || FAIL
-debug-clearbreakpointgroup: PASS, FAIL if $mode == debug
-debug-conditional-breakpoints: CRASH || FAIL
-debug-evaluate: CRASH || FAIL
-debug-ignore-breakpoints: CRASH || FAIL
-debug-multiple-breakpoints: CRASH || FAIL
-debug-setbreakpoint: CRASH || FAIL || PASS
-debug-step-stub-callfunction: SKIP
-debug-stepin-accessor: CRASH || FAIL
-debug-stepin-builtin: CRASH || FAIL
-debug-stepin-call-function-stub: CRASH || FAIL
-debug-stepin-constructor: CRASH, FAIL
-debug-stepin-function-call: CRASH || FAIL
-debug-step: SKIP
-debug-breakpoints: PASS || FAIL
-debug-handle: CRASH || FAIL || PASS
-regress/regress-269: SKIP
-
# Bug number 130 http://code.google.com/p/v8/issues/detail?id=130
# Fails on real ARM hardware but not on the simulator.
string-compare-alignment: PASS || FAIL
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-220.js b/V8Binding/v8/test/mjsunit/regress/regress-220.js
index 416aa41..32c6471 100644
--- a/V8Binding/v8/test/mjsunit/regress/regress-220.js
+++ b/V8Binding/v8/test/mjsunit/regress/regress-220.js
@@ -28,4 +28,4 @@
function foo(f) { eval(f); }
// Ensure that compiling a declaration of a function does not crash.
-foo("function (x) { with ({x: []}) function x(){} }");
+foo("(function (x) { with ({x: []}) function x(){} })");
diff --git a/V8Binding/v8/test/mjsunit/smi-negative-zero.js b/V8Binding/v8/test/mjsunit/smi-negative-zero.js
index afeb6de..719ee49 100644
--- a/V8Binding/v8/test/mjsunit/smi-negative-zero.js
+++ b/V8Binding/v8/test/mjsunit/smi-negative-zero.js
@@ -47,40 +47,40 @@ assertEquals(one / (minus_one * minus_one), 1, "one / 1");
assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
assertEquals(one / (zero / one), Infinity, "one / 0 II");
-assertEquals(one / (minus_four % two), -Infinity, "foo");
-assertEquals(one / (minus_four % minus_two), -Infinity, "foo");
-assertEquals(one / (four % two), Infinity, "foo");
-assertEquals(one / (four % minus_two), Infinity, "foo");
+assertEquals(one / (minus_four % two), -Infinity, "foo1");
+assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
+assertEquals(one / (four % two), Infinity, "foo3");
+assertEquals(one / (four % minus_two), Infinity, "foo4");
// literal op variable
-assertEquals(one / (0 * minus_one), -Infinity, "bar");
-assertEquals(one / (-1 * zero), -Infinity, "bar");
-assertEquals(one / (0 * zero), Infinity, "bar");
-assertEquals(one / (-1 * minus_one), 1, "bar");
+assertEquals(one / (0 * minus_one), -Infinity, "bar1");
+assertEquals(one / (-1 * zero), -Infinity, "bar2");
+assertEquals(one / (0 * zero), Infinity, "bar3");
+assertEquals(one / (-1 * minus_one), 1, "bar4");
-assertEquals(one / (0 / minus_one), -Infinity, "baz");
-assertEquals(one / (0 / one), Infinity, "baz");
+assertEquals(one / (0 / minus_one), -Infinity, "baz1");
+assertEquals(one / (0 / one), Infinity, "baz2");
-assertEquals(one / (-4 % two), -Infinity, "baz");
-assertEquals(one / (-4 % minus_two), -Infinity, "baz");
-assertEquals(one / (4 % two), Infinity, "baz");
-assertEquals(one / (4 % minus_two), Infinity, "baz");
+assertEquals(one / (-4 % two), -Infinity, "baz3");
+assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
+assertEquals(one / (4 % two), Infinity, "baz5");
+assertEquals(one / (4 % minus_two), Infinity, "baz6");
// variable op literal
-assertEquals(one / (zero * -1), -Infinity, "fizz");
-assertEquals(one / (minus_one * 0), -Infinity, "fizz");
-assertEquals(one / (zero * 0), Infinity, "fizz");
-assertEquals(one / (minus_one * -1), 1, "fizz");
+assertEquals(one / (zero * -1), -Infinity, "fizz1");
+assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
+assertEquals(one / (zero * 0), Infinity, "fizz3");
+assertEquals(one / (minus_one * -1), 1, "fizz4");
-assertEquals(one / (zero / -1), -Infinity, "buzz");
-assertEquals(one / (zero / 1), Infinity, "buzz");
+assertEquals(one / (zero / -1), -Infinity, "buzz1");
+assertEquals(one / (zero / 1), Infinity, "buzz2");
-assertEquals(one / (minus_four % 2), -Infinity, "buzz");
-assertEquals(one / (minus_four % -2), -Infinity, "buzz");
-assertEquals(one / (four % 2), Infinity, "buzz");
-assertEquals(one / (four % -2), Infinity, "buzz");
+assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
+assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
+assertEquals(one / (four % 2), Infinity, "buzz5");
+assertEquals(one / (four % -2), Infinity, "buzz6");
// literal op literal
@@ -91,10 +91,10 @@ assertEquals(one / (-1 * 0), -Infinity, "fisk3");
assertEquals(one / (0 * 0), Infinity, "fisk4");
assertEquals(one / (-1 * -1), 1, "fisk5");
-assertEquals(one / (0 / -1), -Infinity, "hest");
-assertEquals(one / (0 / 1), Infinity, "hest");
+assertEquals(one / (0 / -1), -Infinity, "hest1");
+assertEquals(one / (0 / 1), Infinity, "hest2");
-assertEquals(one / (-4 % 2), -Infinity, "fiskhest");
-assertEquals(one / (-4 % -2), -Infinity, "fiskhest");
-assertEquals(one / (4 % 2), Infinity, "fiskhest");
-assertEquals(one / (4 % -2), Infinity, "fiskhest");
+assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
+assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
+assertEquals(one / (4 % 2), Infinity, "fiskhest3");
+assertEquals(one / (4 % -2), Infinity, "fiskhest4");
diff --git a/V8Binding/v8/test/mjsunit/switch.js b/V8Binding/v8/test/mjsunit/switch.js
index 4044490..180f994 100644
--- a/V8Binding/v8/test/mjsunit/switch.js
+++ b/V8Binding/v8/test/mjsunit/switch.js
@@ -269,7 +269,7 @@ assertEquals("A", f7((170/16)-(170%16/16)), "0-1-switch.heapnum");
function makeVeryLong(length) {
- var res = "function() {\n" +
+ var res = "(function () {\n" +
" var res = 0;\n" +
" for (var i = 0; i <= " + length + "; i++) {\n" +
" switch(i) {\n";
@@ -280,7 +280,7 @@ function makeVeryLong(length) {
" }\n" +
" }\n" +
" return res;\n" +
- "}";
+ "})";
return eval(res);
}
var verylong_size = 1000;
diff --git a/V8Binding/v8/test/mjsunit/testcfg.py b/V8Binding/v8/test/mjsunit/testcfg.py
index 97924c8..e3f3fcd 100644
--- a/V8Binding/v8/test/mjsunit/testcfg.py
+++ b/V8Binding/v8/test/mjsunit/testcfg.py
@@ -112,8 +112,9 @@ class MjsunitTestConfiguration(test.TestConfiguration):
mjsunit = [current_path + [t] for t in self.Ls(self.root)]
regress = [current_path + ['regress', t] for t in self.Ls(join(self.root, 'regress'))]
bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
+ third_party = [current_path + ['third_party', t] for t in self.Ls(join(self.root, 'third_party'))]
tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
- all_tests = mjsunit + regress + bugs + tools
+ all_tests = mjsunit + regress + bugs + third_party + tools
result = []
for test in all_tests:
if self.Contains(path, test):
diff --git a/V8Binding/v8/test/mjsunit/third_party/array-splice-webkit.js b/V8Binding/v8/test/mjsunit/third_party/array-splice-webkit.js
new file mode 100644
index 0000000..b676a7c
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/third_party/array-splice-webkit.js
@@ -0,0 +1,62 @@
+// Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Simple splice tests based on webkit layout tests.
+var arr = ['a','b','c','d'];
+assertArrayEquals(['a','b','c','d'], arr);
+assertArrayEquals(['c','d'], arr.splice(2));
+assertArrayEquals(['a','b'], arr);
+assertArrayEquals(['a','b'], arr.splice(0));
+assertArrayEquals([], arr)
+
+arr = ['a','b','c','d'];
+assertEquals(undefined, arr.splice())
+assertArrayEquals(['a','b','c','d'], arr);
+assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
+assertArrayEquals([], arr);
+
+arr = ['a','b','c','d'];
+assertArrayEquals(['a','b','c','d'], arr.splice(null))
+assertArrayEquals([], arr);
+
+arr = ['a','b','c','d'];
+assertArrayEquals([], arr.splice(100))
+assertArrayEquals(['a','b','c','d'], arr);
+assertArrayEquals(['d'], arr.splice(-1))
+assertArrayEquals(['a','b','c'], arr);
+
+assertArrayEquals([], arr.splice(2, undefined))
+assertArrayEquals([], arr.splice(2, null))
+assertArrayEquals([], arr.splice(2, -1))
+assertArrayEquals([], arr.splice(2, 0))
+assertArrayEquals(['a','b','c'], arr);
+assertArrayEquals(['c'], arr.splice(2, 100))
+assertArrayEquals(['a','b'], arr);
diff --git a/V8Binding/v8/test/mjsunit/third_party/object-keys.js b/V8Binding/v8/test/mjsunit/third_party/object-keys.js
new file mode 100644
index 0000000..999ce70
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/third_party/object-keys.js
@@ -0,0 +1,68 @@
+// Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Based on LayoutTests/fast/js/Object-keys.html
+
+assertThrows(function () { Object.keys(2) }, TypeError);
+assertThrows(function () { Object.keys("foo") }, TypeError);
+assertThrows(function () { Object.keys(null) }, TypeError);
+assertThrows(function () { Object.keys(undefined) }, TypeError);
+
+assertEquals(Object.keys({}), []);
+assertEquals(Object.keys({a:null}), ['a']);
+assertEquals(Object.keys({a:null, b:null}), ['a', 'b']);
+assertEquals(Object.keys({b:null, a:null}), ['b', 'a']);
+assertEquals(Object.keys([]), []);
+assertEquals(Object.keys([null]), ['0']);
+assertEquals(Object.keys([null,null]), ['0', '1']);
+assertEquals(Object.keys([null,null,,,,null]), ['0', '1', '5']);
+assertEquals(Object.keys({__proto__:{a:null}}), []);
+assertEquals(Object.keys({__proto__:[1,2,3]}), []);
+var x = [];
+x.__proto__ = [1, 2, 3];
+assertEquals(Object.keys(x), []);
+assertEquals(Object.keys(function () {}), []);
+
+assertEquals('string', typeof(Object.keys([1])[0]));
+
+function argsTest(a, b, c) {
+ assertEquals([0, 1, 2], Object.keys(arguments));
+}
+
+argsTest(1, 2, 3);
+
+var literal = {a: 1, b: 2, c: 3};
+var keysBefore = Object.keys(literal);
+assertEquals(['a', 'b', 'c'], keysBefore);
+keysBefore[0] = 'x';
+var keysAfter = Object.keys(literal);
+assertEquals(['a', 'b', 'c'], keysAfter);
+assertEquals(['x', 'b', 'c'], keysBefore);
diff --git a/V8Binding/v8/test/mjsunit/regexp-pcre.js b/V8Binding/v8/test/mjsunit/third_party/regexp-pcre.js
index dcb1b32..dcb1b32 100644
--- a/V8Binding/v8/test/mjsunit/regexp-pcre.js
+++ b/V8Binding/v8/test/mjsunit/third_party/regexp-pcre.js
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index 9793dc8..c92bfa6 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -217,6 +217,8 @@ js1_5/Function/regress-338121-01: FAIL_OK
js1_5/Function/regress-338121-02: FAIL_OK
js1_5/Function/regress-338121-03: FAIL_OK
+# Expectes 'prototype' property of functions to be enumerable.
+js1_5/Function/10.1.6-01: FAIL_OK
# Length of objects whose prototype chain includes a function
ecma_3/Function/regress-313570: FAIL_OK
@@ -567,11 +569,6 @@ js1_5/Array/regress-350256-02: FAIL
ecma_3/Function/regress-137181: FAIL
-# Tests that rely on specific details of function decompilation or
-# print strings for errors. Non-ECMA behavior.
-js1_4/Regress/function-003: FAIL
-
-
# 'export' and 'import' are not keywords in V8.
ecma_2/Exceptions/lexical-010: FAIL
ecma_2/Exceptions/lexical-022: FAIL
diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp
index 1222ea9..46a00f4 100644
--- a/V8Binding/v8/tools/gyp/v8.gyp
+++ b/V8Binding/v8/tools/gyp/v8.gyp
@@ -34,12 +34,10 @@
'v8_use_snapshot%': 'true',
'v8_regexp%': 'native',
},
- 'includes': [
- '../../../build/common.gypi',
- ],
'target_defaults': {
'defines': [
'ENABLE_LOGGING_AND_PROFILING',
+ 'ENABLE_DEBUGGER_SUPPORT',
],
'conditions': [
['target_arch=="arm"', {
@@ -89,7 +87,7 @@
'-O3',
],
'conditions': [
- [ 'gcc_version=="44"', {
+ [ 'gcc_version==44', {
'cflags': [
# Avoid gcc 4.4 strict aliasing issues in dtoa.c
'-fno-strict-aliasing',
@@ -221,8 +219,6 @@
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
- '../../src/cfg.cc',
- '../../src/cfg.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
@@ -281,6 +277,8 @@
'../../src/heap-inl.h',
'../../src/heap.cc',
'../../src/heap.h',
+ '../../src/heap-profiler.cc',
+ '../../src/heap-profiler.h',
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
@@ -390,7 +388,6 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
- '../../src/arm/cfg-arm.cc',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -421,7 +418,6 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/cfg-ia32.cc',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -450,7 +446,6 @@
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
- '../../src/x64/cfg-x64.cc',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
diff --git a/V8Binding/v8/tools/js2c.py b/V8Binding/v8/tools/js2c.py
index cae39e8..2b7dbdf 100755
--- a/V8Binding/v8/tools/js2c.py
+++ b/V8Binding/v8/tools/js2c.py
@@ -52,20 +52,6 @@ def RemoveCommentsAndTrailingWhitespace(lines):
return lines
-def CompressScript(lines, do_jsmin):
- # If we're not expecting this code to be user visible, we can run it through
- # a more aggressive minifier.
- if do_jsmin:
- return jsmin.jsmin(lines)
-
- # Remove stuff from the source that we don't want to appear when
- # people print the source code using Function.prototype.toString().
- # Note that we could easily compress the scripts mode but don't
- # since we want it to remain readable.
- lines = RemoveCommentsAndTrailingWhitespace(lines)
- return lines
-
-
def ReadFile(filename):
file = open(filename, "rt")
try:
@@ -295,16 +281,18 @@ def JS2C(source, target, env):
# Build source code lines
source_lines = [ ]
+
+ minifier = jsmin.JavaScriptMinifier()
+
source_lines_empty = []
for module in modules:
filename = str(module)
delay = filename.endswith('-delay.js')
lines = ReadFile(filename)
- do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
Validate(lines, filename)
- lines = CompressScript(lines, do_jsmin)
+ lines = minifier.JSMinify(lines)
data = ToCArray(lines)
id = (os.path.split(filename)[1])[:-3]
if delay: id = id[:-6]
diff --git a/V8Binding/v8/tools/jsmin.py b/V8Binding/v8/tools/jsmin.py
index ae75814..fd1abe4 100644
--- a/V8Binding/v8/tools/jsmin.py
+++ b/V8Binding/v8/tools/jsmin.py
@@ -1,218 +1,278 @@
-#!/usr/bin/python
-
-# This code is original from jsmin by Douglas Crockford, it was translated to
-# Python by Baruch Even. The original code had the following copyright and
-# license.
-#
-# /* jsmin.c
-# 2007-05-22
-#
-# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy of
-# this software and associated documentation files (the "Software"), to deal in
-# the Software without restriction, including without limitation the rights to
-# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-# of the Software, and to permit persons to whom the Software is furnished to do
-# so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# The Software shall be used for Good, not Evil.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-# */
-
-from StringIO import StringIO
-
-def jsmin(js):
- ins = StringIO(js)
- outs = StringIO()
- JavascriptMinify().minify(ins, outs)
- str = outs.getvalue()
- if len(str) > 0 and str[0] == '\n':
- str = str[1:]
- return str
-
-def isAlphanum(c):
- """return true if the character is a letter, digit, underscore,
- dollar sign, or non-ASCII character.
- """
- return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
- (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
-
-class UnterminatedComment(Exception):
- pass
-
-class UnterminatedStringLiteral(Exception):
- pass
-
-class UnterminatedRegularExpression(Exception):
- pass
-
-class JavascriptMinify(object):
-
- def _outA(self):
- self.outstream.write(self.theA)
- def _outB(self):
- self.outstream.write(self.theB)
-
- def _get(self):
- """return the next character from stdin. Watch out for lookahead. If
- the character is a control character, translate it to a space or
- linefeed.
- """
- c = self.theLookahead
- self.theLookahead = None
- if c == None:
- c = self.instream.read(1)
- if c >= ' ' or c == '\n':
- return c
- if c == '': # EOF
- return '\000'
- if c == '\r':
- return '\n'
- return ' '
-
- def _peek(self):
- self.theLookahead = self._get()
- return self.theLookahead
-
- def _next(self):
- """get the next character, excluding comments. peek() is used to see
- if an unescaped '/' is followed by a '/' or '*'.
- """
- c = self._get()
- if c == '/' and self.theA != '\\':
- p = self._peek()
- if p == '/':
- c = self._get()
- while c > '\n':
- c = self._get()
- return c
- if p == '*':
- c = self._get()
- while 1:
- c = self._get()
- if c == '*':
- if self._peek() == '/':
- self._get()
- return ' '
- if c == '\000':
- raise UnterminatedComment()
-
- return c
-
- def _action(self, action):
- """do something! What you do is determined by the argument:
- 1 Output A. Copy B to A. Get the next B.
- 2 Copy B to A. Get the next B. (Delete A).
- 3 Get the next B. (Delete B).
- action treats a string as a single character. Wow!
- action recognizes a regular expression if it is preceded by ( or , or =.
- """
- if action <= 1:
- self._outA()
-
- if action <= 2:
- self.theA = self.theB
- if self.theA == "'" or self.theA == '"':
- while 1:
- self._outA()
- self.theA = self._get()
- if self.theA == self.theB:
- break
- if self.theA <= '\n':
- raise UnterminatedStringLiteral()
- if self.theA == '\\':
- self._outA()
- self.theA = self._get()
-
-
- if action <= 3:
- self.theB = self._next()
- if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
- self.theA == '=' or self.theA == ':' or
- self.theA == '[' or self.theA == '?' or
- self.theA == '!' or self.theA == '&' or
- self.theA == '|' or self.theA == ';' or
- self.theA == '{' or self.theA == '}' or
- self.theA == '\n'):
- self._outA()
- self._outB()
- while 1:
- self.theA = self._get()
- if self.theA == '/':
- break
- elif self.theA == '\\':
- self._outA()
- self.theA = self._get()
- elif self.theA <= '\n':
- raise UnterminatedRegularExpression()
- self._outA()
- self.theB = self._next()
-
-
- def _jsmin(self):
- """Copy the input to the output, deleting the characters which are
- insignificant to JavaScript. Comments will be removed. Tabs will be
- replaced with spaces. Carriage returns will be replaced with linefeeds.
- Most spaces and linefeeds will be removed.
- """
- self.theA = '\n'
- self._action(3)
-
- while self.theA != '\000':
- if self.theA == ' ':
- if isAlphanum(self.theB):
- self._action(1)
- else:
- self._action(2)
- elif self.theA == '\n':
- if self.theB in ['{', '[', '(', '+', '-']:
- self._action(1)
- elif self.theB == ' ':
- self._action(3)
- else:
- if isAlphanum(self.theB):
- self._action(1)
- else:
- self._action(2)
- else:
- if self.theB == ' ':
- if isAlphanum(self.theA):
- self._action(1)
- else:
- self._action(3)
- elif self.theB == '\n':
- if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
- self._action(1)
- else:
- if isAlphanum(self.theA):
- self._action(1)
- else:
- self._action(3)
- else:
- self._action(1)
-
- def minify(self, instream, outstream):
- self.instream = instream
- self.outstream = outstream
- self.theA = '\n'
- self.theB = None
- self.theLookahead = None
-
- self._jsmin()
- self.instream.close()
-
-if __name__ == '__main__':
- import sys
- jsm = JavascriptMinify()
- jsm.minify(sys.stdin, sys.stdout)
+#!/usr/bin/python2.4
+
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A JavaScript minifier.
+
+It is far from being a complete JS parser, so there are many valid
+JavaScript programs that will be ruined by it. Another strangeness is that
+it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
+out blank lines in order to ease debugging. Variables at the top scope are
+properties of the global object so we can't rename them. It is assumed that
+you introduce variables with var as if JavaScript followed C++ scope rules
+around curly braces, so the declaration must be above the first use.
+
+Use as:
+import jsmin
+minifier = JavaScriptMinifier()
+program1 = minifier.JSMinify(program1)
+program2 = minifier.JSMinify(program2)
+"""
+
+import re
+
+
+class JavaScriptMinifier(object):
+ """An object that you can feed code snippets to to get them minified."""
+
+ def __init__(self):
+ # We prepopulate the list of identifiers that shouldn't be used. These
+ # short language keywords could otherwise be used by the script as variable
+ # names.
+ self.seen_identifiers = {"do": True, "in": True}
+ self.identifier_counter = 0
+ self.in_comment = False
+ self.map = {}
+ self.nesting = 0
+
+ def LookAtIdentifier(self, m):
+ """Records identifiers or keywords that we see in use.
+
+ (So we can avoid renaming variables to these strings.)
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ Nothing.
+ """
+ identifier = m.group(1)
+ self.seen_identifiers[identifier] = True
+
+ def Push(self):
+ """Called when we encounter a '{'."""
+ self.nesting += 1
+
+ def Pop(self):
+ """Called when we encounter a '}'."""
+ self.nesting -= 1
+ # We treat each top-level opening brace as a single scope that can span
+ # several sets of nested braces.
+ if self.nesting == 0:
+ self.map = {}
+ self.identifier_counter = 0
+
+ def Declaration(self, m):
+ """Rewrites bits of the program selected by a regexp.
+
+ These can be curly braces, literal strings, function declarations and var
+ declarations. (These last two must be on one line including the opening
+ curly brace of the function for their variables to be renamed).
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should replace the match in the rewritten program.
+ """
+ matched_text = m.group(0)
+ if matched_text == "{":
+ self.Push()
+ return matched_text
+ if matched_text == "}":
+ self.Pop()
+ return matched_text
+ if re.match("[\"'/]", matched_text):
+ return matched_text
+ m = re.match(r"var ", matched_text)
+ if m:
+ var_names = matched_text[m.end():]
+ var_names = re.split(r",", var_names)
+ return "var " + ",".join(map(self.FindNewName, var_names))
+ m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
+ if m:
+ up_to_args = m.group(1)
+ args = m.group(2)
+ args = re.split(r",", args)
+ self.Push()
+ return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
+
+ if matched_text in self.map:
+ return self.map[matched_text]
+
+ return matched_text
+
+ def CharFromNumber(self, number):
+ """A single-digit base-52 encoding using a-zA-Z."""
+ if number < 26:
+ return chr(number + 97)
+ number -= 26
+ return chr(number + 65)
+
+ def FindNewName(self, var_name):
+ """Finds a new 1-character or 2-character name for a variable.
+
+ Enters it into the mapping table for this scope.
+
+ Args:
+ var_name: The name of the variable before renaming.
+
+ Returns:
+ The new name of the variable.
+ """
+ new_identifier = ""
+ # Variable names that end in _ are member variables of the global object,
+ # so they can be visible from code in a different scope. We leave them
+ # alone.
+ if var_name in self.map:
+ return self.map[var_name]
+ if self.nesting == 0:
+ return var_name
+ while True:
+ identifier_first_char = self.identifier_counter % 52
+ identifier_second_char = self.identifier_counter / 52
+ new_identifier = self.CharFromNumber(identifier_first_char)
+ if identifier_second_char != 0:
+ new_identifier = (
+ self.CharFromNumber(identifier_second_char - 1) + new_identifier)
+ self.identifier_counter += 1
+ if not new_identifier in self.seen_identifiers:
+ break
+
+ self.map[var_name] = new_identifier
+ return new_identifier
+
+ def RemoveSpaces(self, m):
+ """Returns literal strings unchanged, replaces other inputs with group 2.
+
+ Other inputs are replaced with the contents of capture 1. This is either
+ a single space or an empty string.
+
+ Args:
+ m: The match object returned by re.search.
+
+ Returns:
+ The string that should be inserted instead of the matched text.
+ """
+ entire_match = m.group(0)
+ replacement = m.group(1)
+ if re.match(r"'.*'$", entire_match):
+ return entire_match
+ if re.match(r'".*"$', entire_match):
+ return entire_match
+ if re.match(r"/.+/$", entire_match):
+ return entire_match
+ return replacement
+
+ def JSMinify(self, text):
+ """The main entry point. Takes a text and returns a compressed version.
+
+ The compressed version hopefully does the same thing. Line breaks are
+ preserved.
+
+ Args:
+ text: The text of the code snippet as a multiline string.
+
+ Returns:
+ The compressed text of the code snippet as a multiline string.
+ """
+ new_lines = []
+ for line in re.split(r"\n", text):
+ line = line.replace("\t", " ")
+ if self.in_comment:
+ m = re.search(r"\*/", line)
+ if m:
+ line = line[m.end():]
+ self.in_comment = False
+ else:
+ new_lines.append("")
+ continue
+
+ if not self.in_comment:
+ line = re.sub(r"/\*.*?\*/", " ", line)
+ line = re.sub(r"//.*", "", line)
+ m = re.search(r"/\*", line)
+ if m:
+ line = line[:m.start()]
+ self.in_comment = True
+
+ # Strip leading and trailing spaces.
+ line = re.sub(r"^ +", "", line)
+ line = re.sub(r" +$", "", line)
+ # A regexp that matches a literal string surrounded by "double quotes".
+ # This regexp can handle embedded backslash-escaped characters including
+ # embedded backslash-escaped double quotes.
+ double_quoted_string = r'"(?:[^"\\]|\\.)*"'
+ # A regexp that matches a literal string surrounded by 'double quotes'.
+ single_quoted_string = r"'(?:[^'\\]|\\.)*'"
+ # A regexp that matches a regexp literal surrounded by /slashes/.
+ slash_quoted_regexp = r"/(?:[^/\\]|\\.)+/"
+ # Replace multiple spaces with a single space.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ "( )+"]),
+ self.RemoveSpaces,
+ line)
+ # Strip single spaces unless they have an identifier character both before
+ # and after the space. % and $ are counted as identifier characters.
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
+ self.RemoveSpaces,
+ line)
+ # Collect keywords and identifiers that are already in use.
+ if self.nesting == 0:
+ re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
+ function_declaration_regexp = (
+ r"\bfunction" # Function definition keyword...
+ r"( [\w$%]+)?" # ...optional function name...
+ r"\([\w$%,]+\)\{") # ...argument declarations.
+ # Unfortunately the keyword-value syntax { key:value } makes the key look
+ # like a variable where in fact it is a literal string. We use the
+ # presence or absence of a question mark to try to distinguish between
+ # this case and the ternary operator: "condition ? iftrue : iffalse".
+ if re.search(r"\?", line):
+ block_trailing_colon = r""
+ else:
+ block_trailing_colon = r"(?![:\w$%])"
+ # Variable use. Cannot follow a period precede a colon.
+ variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
+ line = re.sub("|".join([double_quoted_string,
+ single_quoted_string,
+ slash_quoted_regexp,
+ r"\{", # Curly braces.
+ r"\}",
+ r"\bvar [\w$%,]+", # var declarations.
+ function_declaration_regexp,
+ variable_use_regexp]),
+ self.Declaration,
+ line)
+ new_lines.append(line)
+
+ return "\n".join(new_lines) + "\n"
diff --git a/V8Binding/v8/tools/presubmit.py b/V8Binding/v8/tools/presubmit.py
index 3e714de..c4f7853 100755
--- a/V8Binding/v8/tools/presubmit.py
+++ b/V8Binding/v8/tools/presubmit.py
@@ -30,7 +30,7 @@
import optparse
import os
-from os.path import abspath, join, dirname, basename
+from os.path import abspath, join, dirname, basename, exists
import re
import sys
import subprocess
@@ -103,7 +103,7 @@ class SourceFileProcessor(object):
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
- if not self.ProcessFiles(all_files):
+ if not self.ProcessFiles(all_files, path):
return False
return True
@@ -145,9 +145,12 @@ class CppLintProcessor(SourceFileProcessor):
def GetPathsToSearch(self):
return ['src', 'public', 'samples', join('test', 'cctest')]
- def ProcessFiles(self, files):
+ def ProcessFiles(self, files, path):
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = ['cpplint.py', '--filter', filt] + join(files)
+ local_cpplint = join(path, "tools", "cpplint.py")
+ if exists(local_cpplint):
+ command = ['python', local_cpplint, '--filter', filt] + join(files)
process = subprocess.Popen(command)
return process.wait() == 0
@@ -194,7 +197,7 @@ class SourceProcessor(SourceFileProcessor):
result = False
return result
- def ProcessFiles(self, files):
+ def ProcessFiles(self, files, path):
success = True
for file in files:
try:
diff --git a/V8Binding/v8/tools/run-valgrind.py b/V8Binding/v8/tools/run-valgrind.py
index 8a0869c..49c1b70 100755
--- a/V8Binding/v8/tools/run-valgrind.py
+++ b/V8Binding/v8/tools/run-valgrind.py
@@ -58,7 +58,7 @@ if code != 0:
# have any definitely, indirectly, and possibly lost bytes.
LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
LEAK_LINE_MATCHER = re.compile(LEAK_RE)
-LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks.")
+LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
leaks = []
for line in errors:
if LEAK_LINE_MATCHER.search(line):
diff --git a/V8Binding/v8/tools/test.py b/V8Binding/v8/tools/test.py
index c1b8b80..3a60c59 100755
--- a/V8Binding/v8/tools/test.py
+++ b/V8Binding/v8/tools/test.py
@@ -1084,6 +1084,8 @@ def BuildOptions():
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=False, action="store_true")
+ result.add_option("--build-only", help="Only build requirements, don't run the tests",
+ default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
@@ -1261,6 +1263,10 @@ def Main():
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
+ # Just return if we are only building the targets for running the tests.
+ if options.build_only:
+ return 0
+
# Get status for tests
sections = [ ]
defs = { }
diff --git a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
index f9241f9..2d38681 100644
--- a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
@@ -207,6 +207,8 @@
89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17D0E719B8F00D62E90 /* simulator-arm.cc */; };
89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */; };
89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
+ 9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
+ 9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
@@ -533,6 +535,8 @@
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
89FB0E360F8E531900B04B3C /* d8-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-posix.cc"; path = "../src/d8-posix.cc"; sourceTree = "<group>"; };
89FB0E370F8E531900B04B3C /* d8-windows.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-windows.cc"; path = "../src/d8-windows.cc"; sourceTree = "<group>"; };
+ 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "heap-profiler.cc"; sourceTree = "<group>"; };
+ 9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "heap-profiler.h"; sourceTree = "<group>"; };
9F4B7B870FCC877A00DC4117 /* log-utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "log-utils.cc"; sourceTree = "<group>"; };
9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "func-name-inferrer.cc"; sourceTree = "<group>"; };
@@ -626,7 +630,6 @@
897FF0D70E719AB300D62E90 /* C++ */ = {
isa = PBXGroup;
children = (
- 22A76C900FF259E600FDC694 /* log-inl.h */,
897FF0F60E719B8F00D62E90 /* accessors.cc */,
897FF0F70E719B8F00D62E90 /* accessors.h */,
897FF0F80E719B8F00D62E90 /* allocation.cc */,
@@ -725,6 +728,8 @@
897FF1460E719B8F00D62E90 /* heap-inl.h */,
897FF1470E719B8F00D62E90 /* heap.cc */,
897FF1480E719B8F00D62E90 /* heap.h */,
+ 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */,
+ 9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */,
897FF1490E719B8F00D62E90 /* ic-arm.cc */,
897FF14A0E719B8F00D62E90 /* ic-ia32.cc */,
897FF14B0E719B8F00D62E90 /* ic-inl.h */,
@@ -742,6 +747,7 @@
897FF1510E719B8F00D62E90 /* list.h */,
897FF1520E719B8F00D62E90 /* log.cc */,
897FF1530E719B8F00D62E90 /* log.h */,
+ 22A76C900FF259E600FDC694 /* log-inl.h */,
9F4B7B870FCC877A00DC4117 /* log-utils.cc */,
9F4B7B880FCC877A00DC4117 /* log-utils.h */,
897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */,
@@ -1201,6 +1207,7 @@
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */,
8981F6001010501900D1520E /* frame-element.cc in Sources */,
+ 9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1306,6 +1313,7 @@
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */,
8981F6011010502800D1520E /* frame-element.cc in Sources */,
+ 9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1489,6 +1497,7 @@
V8_TARGET_ARCH_IA32,
V8_NATIVE_REGEXP,
ENABLE_LOGGING_AND_PROFILING,
+ ENABLE_DEBUGGER_SUPPORT,
);
HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = v8;
@@ -1537,6 +1546,7 @@
V8_TARGET_ARCH_ARM,
ENABLE_DISASSEMBLER,
ENABLE_LOGGING_AND_PROFILING,
+ ENABLE_DEBUGGER_SUPPORT,
);
HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = "v8-arm";
diff --git a/V8Binding/v8/tools/visual_studio/common.vsprops b/V8Binding/v8/tools/visual_studio/common.vsprops
index 238dd97..213a081 100644
--- a/V8Binding/v8/tools/visual_studio/common.vsprops
+++ b/V8Binding/v8/tools/visual_studio/common.vsprops
@@ -8,7 +8,7 @@
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources"
- PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING"
+ PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING;ENABLE_DEBUGGER_SUPPORT"
MinimalRebuild="false"
ExceptionHandling="0"
RuntimeTypeInfo="false"
diff --git a/V8Binding/v8/tools/visual_studio/v8_base.vcproj b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
index 421cc7c..7a013c0 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\ia32\cfg-ia32.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
@@ -489,6 +477,14 @@
>
</File>
<File
+ RelativePath="..\..\src\heap-profiler.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\heap-profiler.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\ia32\ic-ia32.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
index 8fe54af..abdb418 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\arm\cfg-arm.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
@@ -313,6 +301,10 @@
>
</File>
<File
+ RelativePath="..\..\src\arm\constants-arm.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\constants-arm.h"
>
</File>
@@ -493,6 +485,14 @@
>
</File>
<File
+ RelativePath="..\..\src\heap-profiler.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\heap-profiler.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\arm\ic-arm.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_base_x64.vcproj b/V8Binding/v8/tools/visual_studio/v8_base_x64.vcproj
index 1e27824..7b8b4d3 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base_x64.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base_x64.vcproj
@@ -237,18 +237,6 @@
>
</File>
<File
- RelativePath="..\..\src\x64\cfg-x64.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\cfg.h"
- >
- </File>
- <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
@@ -489,6 +477,14 @@
>
</File>
<File
+ RelativePath="..\..\src\heap-profiler.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\heap-profiler.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\x64\ic-x64.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
index ec07889..d1cf2e84 100644
--- a/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
@@ -198,6 +198,10 @@
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-heap-profiler.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-lock.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj
index bd49f3b..968d134 100644
--- a/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_cctest_arm.vcproj
@@ -194,6 +194,10 @@
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-heap-profiler.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-lock.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
index d0fbac6..78db1a4 100644
--- a/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
@@ -200,6 +200,10 @@
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-heap-profiler.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-lock.cc"
>
</File>
diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION
index b3ee734..4feb1ca 100644
--- a/WEBKIT_MERGE_REVISION
+++ b/WEBKIT_MERGE_REVISION
@@ -2,4 +2,4 @@ We sync with Chromium release revision, which has both webkit revision and V8 re
http://src.chromium.org/svn/branches/187/src@18043
http://svn.webkit.org/repository/webkit/trunk@47029
-http://v8.googlecode.com/svn/branches/bleeding_edge@2842
+http://v8.googlecode.com/svn/branches/bleeding_edge@3018