summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--V8Binding/Android.v8common.mk2
-rw-r--r--V8Binding/v8/ChangeLog62
-rw-r--r--V8Binding/v8/SConstruct18
-rw-r--r--V8Binding/v8/include/v8-debug.h2
-rw-r--r--V8Binding/v8/include/v8.h67
-rwxr-xr-xV8Binding/v8/src/SConscript37
-rw-r--r--V8Binding/v8/src/api.cc84
-rw-r--r--V8Binding/v8/src/arm/cfg-arm.cc301
-rw-r--r--V8Binding/v8/src/arm/codegen-arm-inl.h31
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc156
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h121
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc12
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc45
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.h15
-rw-r--r--V8Binding/v8/src/ast.cc2
-rw-r--r--V8Binding/v8/src/ast.h34
-rw-r--r--V8Binding/v8/src/builtins.cc26
-rw-r--r--V8Binding/v8/src/cfg.cc741
-rw-r--r--V8Binding/v8/src/cfg.h819
-rw-r--r--V8Binding/v8/src/codegen.cc2
-rw-r--r--V8Binding/v8/src/compiler.cc17
-rw-r--r--V8Binding/v8/src/date-delay.js16
-rw-r--r--V8Binding/v8/src/debug-agent.cc4
-rw-r--r--V8Binding/v8/src/debug.cc5
-rw-r--r--V8Binding/v8/src/execution.cc25
-rw-r--r--V8Binding/v8/src/execution.h2
-rw-r--r--V8Binding/v8/src/factory.cc10
-rw-r--r--V8Binding/v8/src/factory.h4
-rw-r--r--V8Binding/v8/src/flag-definitions.h2
-rw-r--r--V8Binding/v8/src/globals.h1
-rw-r--r--V8Binding/v8/src/handles.cc23
-rw-r--r--V8Binding/v8/src/handles.h4
-rw-r--r--V8Binding/v8/src/heap-inl.h2
-rw-r--r--V8Binding/v8/src/heap.cc235
-rw-r--r--V8Binding/v8/src/heap.h14
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.h4
-rw-r--r--V8Binding/v8/src/ia32/builtins-ia32.cc14
-rw-r--r--V8Binding/v8/src/ia32/cfg-ia32.cc315
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.cc83
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.h57
-rw-r--r--V8Binding/v8/src/ia32/ic-ia32.cc122
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc95
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc35
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h1
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc500
-rw-r--r--V8Binding/v8/src/ic.cc17
-rw-r--r--V8Binding/v8/src/ic.h28
-rw-r--r--V8Binding/v8/src/jsregexp-inl.h260
-rw-r--r--V8Binding/v8/src/jsregexp.cc8
-rw-r--r--V8Binding/v8/src/jsregexp.h102
-rw-r--r--V8Binding/v8/src/log.cc90
-rw-r--r--V8Binding/v8/src/log.h10
-rw-r--r--V8Binding/v8/src/messages.js41
-rw-r--r--V8Binding/v8/src/objects-debug.cc92
-rw-r--r--V8Binding/v8/src/objects-inl.h121
-rw-r--r--V8Binding/v8/src/objects.cc1023
-rw-r--r--V8Binding/v8/src/objects.h86
-rw-r--r--V8Binding/v8/src/parser.cc6
-rw-r--r--V8Binding/v8/src/platform-linux.cc99
-rw-r--r--V8Binding/v8/src/platform-macos.cc42
-rw-r--r--V8Binding/v8/src/platform-nullos.cc2
-rw-r--r--V8Binding/v8/src/platform-posix.cc8
-rw-r--r--V8Binding/v8/src/platform-win32.cc2
-rw-r--r--V8Binding/v8/src/platform.h2
-rw-r--r--V8Binding/v8/src/prettyprinter.cc10
-rw-r--r--V8Binding/v8/src/prettyprinter.h10
-rw-r--r--V8Binding/v8/src/rewriter.cc4
-rw-r--r--V8Binding/v8/src/runtime.cc160
-rw-r--r--V8Binding/v8/src/spaces-inl.h25
-rw-r--r--V8Binding/v8/src/spaces.h56
-rw-r--r--V8Binding/v8/src/string-stream.cc9
-rw-r--r--V8Binding/v8/src/string-stream.h16
-rw-r--r--V8Binding/v8/src/stub-cache.cc150
-rw-r--r--V8Binding/v8/src/stub-cache.h20
-rw-r--r--V8Binding/v8/src/usage-analyzer.cc4
-rw-r--r--V8Binding/v8/src/variables.h4
-rw-r--r--V8Binding/v8/src/version.cc2
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc54
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h28
-rw-r--r--V8Binding/v8/src/x64/builtins-x64.cc159
-rw-r--r--V8Binding/v8/src/x64/cfg-x64.cc323
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc1099
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h68
-rw-r--r--V8Binding/v8/src/x64/disasm-x64.cc67
-rw-r--r--V8Binding/v8/src/x64/ic-x64.cc744
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc186
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h3
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc1452
-rw-r--r--V8Binding/v8/src/zone-inl.h217
-rw-r--r--V8Binding/v8/src/zone.h102
-rw-r--r--V8Binding/v8/test/cctest/SConscript4
-rw-r--r--V8Binding/v8/test/cctest/cctest.status6
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc190
-rw-r--r--V8Binding/v8/test/cctest/test-ast.cc4
-rw-r--r--V8Binding/v8/test/cctest/test-debug.cc68
-rw-r--r--V8Binding/v8/test/cctest/test-heap.cc2
-rw-r--r--V8Binding/v8/test/cctest/test-log-stack-tracer.cc (renamed from V8Binding/v8/test/cctest/test-log-ia32.cc)12
-rw-r--r--V8Binding/v8/test/cctest/test-log.cc44
-rw-r--r--V8Binding/v8/test/cctest/test-regexp.cc2
-rw-r--r--V8Binding/v8/test/mjsunit/debug-stepin-builtin.js78
-rw-r--r--V8Binding/v8/test/mjsunit/mjsunit.status12
-rw-r--r--V8Binding/v8/test/mjsunit/regress/regress-416.js38
-rw-r--r--V8Binding/v8/test/mjsunit/stack-traces.js28
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status15
-rw-r--r--V8Binding/v8/tools/gyp/v8.gyp41
-rwxr-xr-xV8Binding/v8/tools/process-heap-prof.py13
-rw-r--r--V8Binding/v8/tools/tickprocessor.js2
-rw-r--r--V8Binding/v8/tools/v8.xcodeproj/project.pbxproj2
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base.vcproj16
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj16
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_cctest.vcproj2
-rw-r--r--WEBKIT_MERGE_REVISION2
112 files changed, 9273 insertions, 2502 deletions
diff --git a/V8Binding/Android.v8common.mk b/V8Binding/Android.v8common.mk
index 4da3a30..d120588 100644
--- a/V8Binding/Android.v8common.mk
+++ b/V8Binding/Android.v8common.mk
@@ -8,6 +8,7 @@ V8_LOCAL_SRC_FILES := \
src/ast.cc \
src/bootstrapper.cc \
src/builtins.cc \
+ src/cfg.cc \
src/checks.cc \
src/code-stubs.cc \
src/codegen.cc \
@@ -71,6 +72,7 @@ ifeq ($(TARGET_ARCH),arm)
V8_LOCAL_SRC_FILES += \
src/arm/assembler-arm.cc \
src/arm/builtins-arm.cc \
+ src/arm/cfg-arm.cc \
src/arm/codegen-arm.cc \
src/arm/cpu-arm.cc \
src/arm/disasm-arm.cc \
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index 83ebc02..03b96f1 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,65 @@
+2009-08-05: Version 1.3.2
+
+ Started new compiler infrastructure for two-pass compilation using a
+ control flow graph constructed from the AST.
+
+ Profiler stack sampling for X64.
+
+ Safe handling of NaN to Posix platform-dependent time functions.
+
+ Added a new profiler control API to unify controlling various aspects
+ of profiling.
+
+ Fixed issue 392.
+
+
+2009-07-30: Version 1.3.1
+
+ Speed improvements to accessors and interceptors.
+
+ Added support for capturing stack information on custom errors.
+
+ Added support for morphing an object into a pixel array where its
+ indexed properties are stored in an external byte array. Values written
+ are always clamped to the 0..255 interval.
+
+ Profiler on x64 now handles C/C++ functions from shared libraries.
+
+ Changed the debugger to avoid stepping into function.call/apply if the
+ function is a built-in.
+
+ Initial implementation of constructor heap profile for JS objects.
+
+ More fine grained control of profiling aspects through the API.
+
+ Optimized the called as constructor check for API calls.
+
+
+2009-07-27: Version 1.3.0
+
+ Allowed RegExp objects to be called as functions (issue 132).
+
+ Fixed issue where global property cells would escape after
+ detaching the global object; see http://crbug.com/16276.
+
+ Added support for stepping into setters and getters in the
+ debugger.
+
+ Changed the debugger to avoid stopping in its own JavaScript code
+ and in the code of built-in functions.
+
+ Fixed issue 345 by avoiding duplicate escaping labels.
+
+ Fixed ARM code generator crash in short-circuited boolean
+ expressions and added regression tests.
+
+ Added an external allocation limit to avoid issues where small V8
+ objects would hold on to large amounts of external memory without
+ causing garbage collections.
+
+ Finished more of the inline caching stubs for x64 targets.
+
+
2009-07-13: Version 1.2.14
Added separate paged heap space for global property cells and
diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct
index dbcd616..c981ef9 100644
--- a/V8Binding/v8/SConstruct
+++ b/V8Binding/v8/SConstruct
@@ -79,7 +79,9 @@ ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
- ANDROID_TOP + '/bionic/libthread_db/include']
+ ANDROID_TOP + '/bionic/libthread_db/include',
+ ANDROID_TOP + '/frameworks/base/include',
+ ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
@@ -126,6 +128,7 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
+ 'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
@@ -218,8 +221,11 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'CXXFLAGS': [], #['-fvisibility=hidden'],
- 'WARNINGFLAGS': ['-Wall', '-Werror', '-W',
- '-Wno-unused-parameter']
+ 'WARNINGFLAGS': ['-Wall',
+ '-Werror',
+ '-W',
+ '-Wno-unused-parameter',
+ '-Wnon-virtual-dtor']
},
'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
@@ -327,7 +333,7 @@ CCTEST_EXTRA_FLAGS = {
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
@@ -378,7 +384,7 @@ SAMPLE_FLAGS = {
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
@@ -466,7 +472,7 @@ D8_FLAGS = {
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
- 'LIBS': ['c', 'stdc++', 'm'],
+ 'LIBS': ['log', 'c', 'stdc++', 'm'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
diff --git a/V8Binding/v8/include/v8-debug.h b/V8Binding/v8/include/v8-debug.h
index 1a4840e..345d331 100644
--- a/V8Binding/v8/include/v8-debug.h
+++ b/V8Binding/v8/include/v8-debug.h
@@ -55,7 +55,7 @@ typedef long long int64_t; // NOLINT
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#define EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define EXPORT
diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h
index cf8a3bf..83d5bed 100644
--- a/V8Binding/v8/include/v8.h
+++ b/V8Binding/v8/include/v8.h
@@ -85,11 +85,10 @@ typedef unsigned __int64 uint64_t;
#include <stdint.h>
-// Setup for Linux shared library export. There is no need to destinguish
-// neither between building or using the V8 shared library nor between using
-// the shared or static V8 library as there is on Windows. Therefore there is
-// no checking of BUILDING_V8_SHARED and USING_V8_SHARED.
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+// Setup for Linux shared library export. There is no need to distinguish
+// between building or using the V8 shared library, but we should not
+// export symbols when we are building a static library.
+#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#define V8EXPORT_INLINE __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
@@ -1100,6 +1099,12 @@ class V8EXPORT Object : public Value {
Local<Value> GetPrototype();
/**
+ * Finds an instance of the given function template in the prototype
+ * chain.
+ */
+ Local<Object> FindInstanceInPrototypeChain(Handle<FunctionTemplate> tmpl);
+
+ /**
* Call builtin Object.prototype.toString on this object.
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
@@ -1169,6 +1174,15 @@ class V8EXPORT Object : public Value {
*/
Local<Object> Clone();
+ /**
+ * Set the backing store of the indexed properties to be managed by the
+ * embedding layer. Access to the indexed properties will follow the rules
+ * spelled out in CanvasPixelArray.
+ * Note: The embedding program still owns the data and needs to ensure that
+ * the backing store is preserved while V8 has a reference.
+ */
+ void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
+
static Local<Object> New();
static Object* Cast(Value* obj);
private:
@@ -1958,6 +1972,23 @@ typedef Persistent<Context> (*ContextGenerator)();
/**
+ * Profiler modules.
+ *
+ * In V8, profiler consists of several modules: CPU profiler, and different
+ * kinds of heap profiling. Each can be turned on / off independently.
+ * When PROFILER_MODULE_HEAP_SNAPSHOT flag is passed to ResumeProfilerEx,
+ * modules are enabled only temporarily for making a snapshot of the heap.
+ */
+enum ProfilerModules {
+ PROFILER_MODULE_NONE = 0,
+ PROFILER_MODULE_CPU = 1,
+ PROFILER_MODULE_HEAP_STATS = 1 << 1,
+ PROFILER_MODULE_JS_CONSTRUCTORS = 1 << 2,
+ PROFILER_MODULE_HEAP_SNAPSHOT = 1 << 16
+};
+
+
+/**
* Container class for static utility functions.
*/
class V8EXPORT V8 {
@@ -2111,6 +2142,32 @@ class V8EXPORT V8 {
static bool IsProfilerPaused();
/**
+ * Resumes specified profiler modules.
+ * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
+ * See ProfilerModules enum.
+ *
+ * \param flags Flags specifying profiler modules.
+ */
+ static void ResumeProfilerEx(int flags);
+
+ /**
+ * Pauses specified profiler modules.
+ * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
+ * See ProfilerModules enum.
+ *
+ * \param flags Flags specifying profiler modules.
+ */
+ static void PauseProfilerEx(int flags);
+
+ /**
+ * Returns active (resumed) profiler modules.
+ * See ProfilerModules enum.
+ *
+ * \returns active profiler modules.
+ */
+ static int GetActiveProfilerModules();
+
+ /**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
diff --git a/V8Binding/v8/src/SConscript b/V8Binding/v8/src/SConscript
index f9f9634..a9669a1 100755
--- a/V8Binding/v8/src/SConscript
+++ b/V8Binding/v8/src/SConscript
@@ -36,25 +36,26 @@ Import('context')
SOURCES = {
'all': [
'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
- 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
- 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
- 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
- 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
- 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
- 'global-handles.cc', 'handles.cc', 'hashmap.cc',
- 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
- 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
- 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
- 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
- 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc', 'runtime.cc',
- 'scanner.cc', 'scopeinfo.cc', 'scopes.cc', 'serialize.cc',
- 'snapshot-common.cc', 'spaces.cc', 'string-stream.cc', 'stub-cache.cc',
- 'token.cc', 'top.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc',
- 'v8-counters.cc', 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
+ 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
+ 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
+ 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
+ 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
+ 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
+ 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
+ 'hashmap.cc', 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc',
+ 'jsregexp.cc', 'jump-target.cc', 'log.cc', 'log-utils.cc',
+ 'mark-compact.cc', 'messages.cc', 'objects.cc', 'oprofile-agent.cc',
+ 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
+ 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+ 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+ 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+ 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
+ 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
- 'arm/assembler-arm.cc', 'arm/builtins-arm.cc',
+ 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
@@ -63,7 +64,7 @@ SOURCES = {
'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
@@ -72,7 +73,7 @@ SOURCES = {
'ia32/virtual-frame-ia32.cc'
],
'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc',
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc
index 9e3ca9b..0f0d002 100644
--- a/V8Binding/v8/src/api.cc
+++ b/V8Binding/v8/src/api.cc
@@ -1928,6 +1928,22 @@ Local<Value> v8::Object::GetPrototype() {
}
+Local<Object> v8::Object::FindInstanceInPrototypeChain(
+ v8::Handle<FunctionTemplate> tmpl) {
+ ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
+ return Local<v8::Object>());
+ ENTER_V8;
+ i::JSObject* object = *Utils::OpenHandle(this);
+ i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
+ while (!object->IsInstanceOf(tmpl_info)) {
+ i::Object* prototype = object->GetPrototype();
+ if (!prototype->IsJSObject()) return Local<Object>();
+ object = i::JSObject::cast(prototype);
+ }
+ return Utils::ToLocal(i::Handle<i::JSObject>(object));
+}
+
+
Local<Array> v8::Object::GetPropertyNames() {
ON_BAILOUT("v8::Object::GetPropertyNames()", return Local<v8::Array>());
ENTER_V8;
@@ -2194,6 +2210,25 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
}
+void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
+ ON_BAILOUT("v8::SetElementsToPixelData()", return);
+ ENTER_V8;
+ if (!ApiCheck(i::Smi::IsValid(length),
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "length exceeds max acceptable value")) {
+ return;
+ }
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ if (!ApiCheck(!self->IsJSArray(),
+ "v8::Object::SetIndexedPropertiesToPixelData()",
+ "JSArray is not supported")) {
+ return;
+ }
+ i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
+ self->set_elements(*pixels);
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -2554,9 +2589,12 @@ Persistent<Context> v8::Context::New(
i::Handle<i::Context> env;
{
ENTER_V8;
+#if defined(ANDROID)
+ // On mobile devices, full GC is expensive.
+#else
// Give the heap a chance to cleanup if we've disposed contexts.
i::Heap::CollectAllGarbageIfContextDisposed();
-
+#endif
v8::Handle<ObjectTemplate> proxy_template = global_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
@@ -3057,7 +3095,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
if (!self->HasFastElements()) {
return Local<Object>();
}
- i::FixedArray* elms = self->elements();
+ i::FixedArray* elms = i::FixedArray::cast(self->elements());
i::Object* paragon = elms->get(index);
if (!paragon->IsJSObject()) {
return Local<Object>();
@@ -3195,27 +3233,63 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::PauseProfiler();
+ i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- i::Logger::ResumeProfiler();
+ i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
#endif
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
- return i::Logger::IsProfilerPaused();
+ return i::Logger::GetActiveProfilerModules() & PROFILER_MODULE_CPU;
#else
return true;
#endif
}
+void V8::ResumeProfilerEx(int flags) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
+ // Snapshot mode: resume modules, perform GC, then pause only
+ // those modules which haven't been started prior to making a
+ // snapshot.
+
+ // Reset snapshot flag and CPU module flags.
+ flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
+ const int current_flags = i::Logger::GetActiveProfilerModules();
+ i::Logger::ResumeProfiler(flags);
+ i::Heap::CollectAllGarbage();
+ i::Logger::PauseProfiler(~current_flags & flags);
+ } else {
+ i::Logger::ResumeProfiler(flags);
+ }
+#endif
+}
+
+
+void V8::PauseProfilerEx(int flags) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Logger::PauseProfiler(flags);
+#endif
+}
+
+
+int V8::GetActiveProfilerModules() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ return i::Logger::GetActiveProfilerModules();
+#else
+ return PROFILER_MODULE_NONE;
+#endif
+}
+
+
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
diff --git a/V8Binding/v8/src/arm/cfg-arm.cc b/V8Binding/v8/src/arm/cfg-arm.cc
new file mode 100644
index 0000000..34e64b3
--- /dev/null
+++ b/V8Binding/v8/src/arm/cfg-arm.cc
@@ -0,0 +1,301 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "codegen-arm.h" // Include after codegen-inl.h.
+#include "macro-assembler-arm.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ mov(ip, Operand(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(r0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ add(sp, sp, Operand((count + 1) * kPointerSize));
+ __ Jump(lr);
+}
+
+
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Discard key and receiver.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ } else {
+ key()->Get(masm, r2);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ pop(); // Discard receiver.
+ }
+ location()->Set(masm, r0);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Move left to r1 and right to r0.
+ left()->Get(masm, r1);
+ right()->Get(masm, r0);
+ GenericBinaryOpStub stub(op(), mode);
+ __ CallStub(&stub);
+ location()->Set(masm, r0);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
+ Comment cmnt(masm, "[ ReturnInstr");
+ value()->Get(masm, r0);
+}
+
+
+void Constant::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, Operand(handle_));
+}
+
+
+void Constant::Push(MacroAssembler* masm) {
+ __ mov(ip, Operand(handle_));
+ __ push(ip);
+}
+
+
+static MemOperand ToMemOperand(SlotLocation* loc) {
+ switch (loc->type()) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return MemOperand(fp, kOffset - loc->index() * kPointerSize);
+ }
+ default:
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ mov(ip, Operand(handle_));
+ __ str(ip, ToMemOperand(loc));
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ ldr(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ str(reg, ToMemOperand(this));
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ ldr(ip, ToMemOperand(this));
+ __ push(ip); // Push will not destroy ip.
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // Double dispatch.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ ldr(ip, ToMemOperand(this));
+ __ str(ip, ToMemOperand(loc));
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(reg, r0);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(r0)) __ mov(r0, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(r0);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, r0);
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ str(r0, ToMemOperand(loc));
+ case STACK:
+ __ pop(ip);
+ __ str(ip, ToMemOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm-inl.h b/V8Binding/v8/src/arm/codegen-arm-inl.h
index 5a29a45..9ff02cb 100644
--- a/V8Binding/v8/src/arm/codegen-arm-inl.h
+++ b/V8Binding/v8/src/arm/codegen-arm-inl.h
@@ -34,6 +34,37 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+void CodeGenerator::LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control) {
+ LoadCondition(expression, typeof_state, true_target, false_target,
+ force_control);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+ TypeofState typeof_state) {
+ Load(expression, typeof_state);
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ VisitStatements(statements);
+}
+
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+ GetValue(typeof_state);
+}
+
+
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 5f8149e..67d4611 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -133,8 +133,7 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
allocator_(NULL),
cc_reg_(al),
state_(NULL),
- function_return_is_shadowed_(false),
- in_spilled_code_(false) {
+ function_return_is_shadowed_(false) {
}
@@ -156,7 +155,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
ASSERT(frame_ == NULL);
frame_ = new VirtualFrame();
cc_reg_ = al;
- set_in_spilled_code(false);
{
CodeGenState state(this);
@@ -423,22 +421,6 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
}
-void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- LoadCondition(expression, typeof_state, true_target, false_target,
- force_control);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
// Loads a value on TOS. If it is a boolean value, the result may have been
// (partially) translated into branches, or it may have set the condition
// code register. If force_cc is set, the value is forced to set the
@@ -450,7 +432,6 @@ void CodeGenerator::LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
- ASSERT(!in_spilled_code());
ASSERT(!has_cc());
int original_height = frame_->height();
@@ -484,21 +465,10 @@ void CodeGenerator::LoadCondition(Expression* x,
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression, typeof_state);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
JumpTarget true_target;
JumpTarget false_target;
LoadCondition(x, typeof_state, &true_target, &false_target, false);
@@ -697,96 +667,6 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
}
-class GenericBinaryOpStub : public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- int constant_rhs = CodeGenerator::kUnknownIntValue)
- : op_(op),
- mode_(mode),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- int constant_rhs_;
- bool specialized_on_rhs_;
-
- static const int kMaxKnownRhs = 0x40000000;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class KnownIntBits: public BitField<int, 8, 8> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt());
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- return key;
- }
-
- const char* GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::MOD: return "GenericBinaryOpStub_MOD";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
- }
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int constant_rhs) {
@@ -1236,28 +1116,6 @@ void CodeGenerator::CheckStack() {
}
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -1854,7 +1712,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
@@ -2912,7 +2769,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
@@ -4248,17 +4104,7 @@ Handle<String> Reference::GetName() {
}
-void Reference::GetValueAndSpill(TypeofState typeof_state) {
- ASSERT(cgen_->in_spilled_code());
- cgen_->set_in_spilled_code(false);
- GetValue(typeof_state);
- cgen_->frame()->SpillAll();
- cgen_->set_in_spilled_code(true);
-}
-
-
void Reference::GetValue(TypeofState typeof_state) {
- ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
index 6391a8e..80d1d56 100644
--- a/V8Binding/v8/src/arm/codegen-arm.h
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -183,9 +183,6 @@ class CodeGenerator: public AstVisitor {
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
static const int kUnknownIntValue = -1;
private:
@@ -215,18 +212,18 @@ class CodeGenerator: public AstVisitor {
#define DEF_VISIT(type) \
void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
+ inline void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+ inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
@@ -263,17 +260,17 @@ class CodeGenerator: public AstVisitor {
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ inline void LoadAndSpill(Expression* expression,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
- void LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control);
+ inline void LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -374,7 +371,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Node* node);
+ void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -405,12 +402,6 @@ class CodeGenerator: public AstVisitor {
// to some unlinking code).
bool function_return_is_shadowed_;
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
@@ -421,6 +412,96 @@ class CodeGenerator: public AstVisitor {
};
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ int constant_rhs = CodeGenerator::kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class KnownIntBits: public BitField<int, 8, 8> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt());
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ return key;
+ }
+
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+ }
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
index 82a2bec..8781256 100644
--- a/V8Binding/v8/src/arm/ic-arm.cc
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -582,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Factory::hash_table_map()));
- __ b(eq, &slow);
+ __ cmp(r3, Operand(Factory::fixed_array_map()));
+ __ b(ne, &slow);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3));
@@ -661,8 +661,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Factory::hash_table_map()));
- __ b(eq, &slow);
+ __ cmp(r2, Operand(Factory::fixed_array_map()));
+ __ b(ne, &slow);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
@@ -710,8 +710,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&array);
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Factory::hash_table_map()));
- __ b(eq, &slow);
+ __ cmp(r1, Operand(Factory::fixed_array_map()));
+ __ b(ne, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
index d6650c9..393db59 100644
--- a/V8Binding/v8/src/arm/stub-cache-arm.cc
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -467,21 +467,23 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
+ __ push(reg); // holder
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
__ push(ip);
+ __ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
+ __ push(reg);
__ push(name_reg); // name
- __ push(reg); // holder
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 4);
+ __ TailCallRuntime(load_callback_property, 5);
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
- Smi* lookup_hint,
+ LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
@@ -500,13 +502,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name_reg); // name
- __ mov(scratch1, Operand(lookup_hint));
+
+ InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+ __ mov(scratch1, Operand(Handle<Object>(interceptor)));
__ push(scratch1);
+ __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
+ __ push(scratch2);
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 4);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(load_ic_property, 5);
}
@@ -676,13 +683,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
- // Make sure object->elements()->map() != Heap::hash_table_map()
+ // Make sure object->HasFastElements().
// Get the elements array of the object.
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Factory::hash_table_map()));
- __ b(eq, &miss);
+ __ cmp(r2, Operand(Factory::fixed_array_map()));
+ __ b(ne, &miss);
break;
default:
@@ -744,8 +751,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
-
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -782,6 +787,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -790,7 +796,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::call_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -951,8 +956,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
-
// Check that the map of the global has not changed.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -963,11 +966,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
__ Ret();
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1054,9 +1057,11 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
+ LookupResult lookup;
+ holder->LocalLookupRealNamedProperty(name, &lookup);
GenerateLoadInterceptor(object,
holder,
- holder->InterceptorPropertyLookupHint(name),
+ &lookup,
r0,
r2,
r3,
@@ -1083,8 +1088,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
-
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
@@ -1109,10 +1112,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ b(eq, &miss);
}
+ __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ Ret();
__ bind(&miss);
- __ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1215,9 +1218,11 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
+ LookupResult lookup;
+ holder->LocalLookupRealNamedProperty(name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
- Smi::FromInt(JSObject::kLookupInHolder),
+ &lookup,
r0,
r2,
r3,
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.h b/V8Binding/v8/src/arm/virtual-frame-arm.h
index 2f36f10..d575df6 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.h
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.h
@@ -52,20 +52,7 @@ class VirtualFrame : public ZoneObject {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ SpilledScope() {}
};
// An illegal index into the virtual frame.
diff --git a/V8Binding/v8/src/ast.cc b/V8Binding/v8/src/ast.cc
index d8a3232..2b60742 100644
--- a/V8Binding/v8/src/ast.cc
+++ b/V8Binding/v8/src/ast.cc
@@ -51,7 +51,7 @@ CallEval CallEval::sentinel_(NULL, NULL, 0);
if (v->CheckStackOverflow()) return; \
v->Visit##type(this); \
}
-NODE_LIST(DECL_ACCEPT)
+AST_NODE_LIST(DECL_ACCEPT)
#undef DECL_ACCEPT
diff --git a/V8Binding/v8/src/ast.h b/V8Binding/v8/src/ast.h
index 64d61cc..3a309ac 100644
--- a/V8Binding/v8/src/ast.h
+++ b/V8Binding/v8/src/ast.h
@@ -53,9 +53,8 @@ namespace internal {
// Nodes of the abstract syntax tree. Only concrete classes are
// enumerated here.
-#define NODE_LIST(V) \
+#define STATEMENT_NODE_LIST(V) \
V(Block) \
- V(Declaration) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@@ -69,7 +68,9 @@ namespace internal {
V(ForInStatement) \
V(TryCatch) \
V(TryFinally) \
- V(DebuggerStatement) \
+ V(DebuggerStatement)
+
+#define EXPRESSION_NODE_LIST(V) \
V(FunctionLiteral) \
V(FunctionBoilerplateLiteral) \
V(Conditional) \
@@ -93,13 +94,17 @@ namespace internal {
V(CompareOperation) \
V(ThisFunction)
+#define AST_NODE_LIST(V) \
+ V(Declaration) \
+ STATEMENT_NODE_LIST(V) \
+ EXPRESSION_NODE_LIST(V)
// Forward declarations
class TargetCollector;
class MaterializedLiteral;
#define DEF_FORWARD_DECLARATION(type) class type;
-NODE_LIST(DEF_FORWARD_DECLARATION)
+AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -108,10 +113,10 @@ NODE_LIST(DEF_FORWARD_DECLARATION)
typedef ZoneList<Handle<String> > ZoneStringList;
-class Node: public ZoneObject {
+class AstNode: public ZoneObject {
public:
- Node(): statement_pos_(RelocInfo::kNoPosition) { }
- virtual ~Node() { }
+ AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
+ virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
// Type testing & conversion.
@@ -143,7 +148,7 @@ class Node: public ZoneObject {
};
-class Statement: public Node {
+class Statement: public AstNode {
public:
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
@@ -152,7 +157,7 @@ class Statement: public Node {
};
-class Expression: public Node {
+class Expression: public AstNode {
public:
virtual Expression* AsExpression() { return this; }
@@ -240,7 +245,7 @@ class Block: public BreakableStatement {
};
-class Declaration: public Node {
+class Declaration: public AstNode {
public:
Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
: proxy_(proxy),
@@ -523,7 +528,7 @@ class IfStatement: public Statement {
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class TargetCollector: public Node {
+class TargetCollector: public AstNode {
public:
explicit TargetCollector(ZoneList<BreakTarget*>* targets)
: targets_(targets) {
@@ -816,9 +821,6 @@ class VariableProxy: public Expression {
return (variable == NULL) ? false : variable->is_arguments();
}
- // If this assertion fails it means that some code has tried to
- // treat the special "this" variable as an ordinary variable with
- // the name "this".
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
UseCount* var_uses() { return &var_uses_; }
@@ -1678,7 +1680,7 @@ class AstVisitor BASE_EMBEDDED {
virtual ~AstVisitor() { }
// Dispatch
- void Visit(Node* node) { node->Accept(this); }
+ void Visit(AstNode* node) { node->Accept(this); }
// Iteration
virtual void VisitStatements(ZoneList<Statement*>* statements);
@@ -1702,7 +1704,7 @@ class AstVisitor BASE_EMBEDDED {
// Individual nodes
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
private:
diff --git a/V8Binding/v8/src/builtins.cc b/V8Binding/v8/src/builtins.cc
index 0648e54..1ea0245 100644
--- a/V8Binding/v8/src/builtins.cc
+++ b/V8Binding/v8/src/builtins.cc
@@ -87,18 +87,34 @@ static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
}
-// TODO(1238487): Get rid of this function that determines if the
-// builtin is called as a constructor. This may be a somewhat slow
-// operation due to the stack frame iteration.
static inline bool CalledAsConstructor() {
+#ifdef DEBUG
+ // Calculate the result using a full stack frame iterator and check
+ // that the state of the stack is as we assume it to be in the
+ // code below.
StackFrameIterator it;
ASSERT(it.frame()->is_exit());
it.Advance();
StackFrame* frame = it.frame();
- return frame->is_construct();
+ bool reference_result = frame->is_construct();
+#endif
+ Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+ // Because we know fp points to an exit frame we can use the relevant
+ // part of ExitFrame::ComputeCallerState directly.
+ const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
+ Address caller_fp = Memory::Address_at(fp + kCallerOffset);
+ // This inlines the part of StackFrame::ComputeType that grabs the
+ // type of the current frame. Note that StackFrame::ComputeType
+ // has been specialized for each architecture so if any one of them
+ // changes this code has to be changed as well.
+ const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
+ const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
+ Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
+ bool result = (marker == kConstructMarker);
+ ASSERT_EQ(result, reference_result);
+ return result;
}
-
// ----------------------------------------------------------------------------
diff --git a/V8Binding/v8/src/cfg.cc b/V8Binding/v8/src/cfg.cc
new file mode 100644
index 0000000..32f614b
--- /dev/null
+++ b/V8Binding/v8/src/cfg.cc
@@ -0,0 +1,741 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "cfg.h"
+#include "scopeinfo.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+CfgGlobals* CfgGlobals::top_ = NULL;
+
+
+CfgGlobals::CfgGlobals(FunctionLiteral* fun)
+ : global_fun_(fun),
+ global_exit_(new ExitNode()),
+ nowhere_(new Nowhere()),
+#ifdef DEBUG
+ node_counter_(0),
+ temp_counter_(0),
+#endif
+ previous_(top_) {
+ top_ = this;
+}
+
+
+#define BAILOUT(reason) \
+ do { return NULL; } while (false)
+
+Cfg* Cfg::Build() {
+ FunctionLiteral* fun = CfgGlobals::current()->fun();
+ if (fun->scope()->num_heap_slots() > 0) {
+ BAILOUT("function has context slots");
+ }
+ if (fun->scope()->num_stack_slots() > kPointerSize) {
+ BAILOUT("function has too many locals");
+ }
+ if (fun->scope()->num_parameters() > kPointerSize - 1) {
+ BAILOUT("function has too many parameters");
+ }
+ if (fun->scope()->arguments() != NULL) {
+ BAILOUT("function uses .arguments");
+ }
+
+ ZoneList<Statement*>* body = fun->body();
+ if (body->is_empty()) {
+ BAILOUT("empty function body");
+ }
+
+ StatementCfgBuilder builder;
+ builder.VisitStatements(body);
+ Cfg* graph = builder.graph();
+ if (graph == NULL) {
+ BAILOUT("unsupported statement type");
+ }
+ if (graph->is_empty()) {
+ BAILOUT("function body produces empty cfg");
+ }
+ if (graph->has_exit()) {
+ BAILOUT("control path without explicit return");
+ }
+ graph->PrependEntryNode();
+ return graph;
+}
+
+#undef BAILOUT
+
+
+void Cfg::PrependEntryNode() {
+ ASSERT(!is_empty());
+ entry_ = new EntryNode(InstructionBlock::cast(entry()));
+}
+
+
+void Cfg::Append(Instruction* instr) {
+ ASSERT(is_empty() || has_exit());
+ if (is_empty()) {
+ entry_ = exit_ = new InstructionBlock();
+ }
+ InstructionBlock::cast(exit_)->Append(instr);
+}
+
+
+void Cfg::AppendReturnInstruction(Value* value) {
+ Append(new ReturnInstr(value));
+ ExitNode* global_exit = CfgGlobals::current()->exit();
+ InstructionBlock::cast(exit_)->set_successor(global_exit);
+ exit_ = NULL;
+}
+
+
+void Cfg::Concatenate(Cfg* other) {
+ ASSERT(is_empty() || has_exit());
+ if (other->is_empty()) return;
+
+ if (is_empty()) {
+ entry_ = other->entry();
+ exit_ = other->exit();
+ } else {
+ // We have a pair of nonempty fragments and this has an available exit.
+ // Destructively glue the fragments together.
+ InstructionBlock* first = InstructionBlock::cast(exit_);
+ InstructionBlock* second = InstructionBlock::cast(other->entry());
+ first->instructions()->AddAll(*second->instructions());
+ if (second->successor() != NULL) {
+ first->set_successor(second->successor());
+ exit_ = other->exit();
+ }
+ }
+}
+
+
+void InstructionBlock::Unmark() {
+ if (is_marked_) {
+ is_marked_ = false;
+ successor_->Unmark();
+ }
+}
+
+
+void EntryNode::Unmark() {
+ if (is_marked_) {
+ is_marked_ = false;
+ successor_->Unmark();
+ }
+}
+
+
+void ExitNode::Unmark() {
+ is_marked_ = false;
+}
+
+
+Handle<Code> Cfg::Compile(Handle<Script> script) {
+ const int kInitialBufferSize = 4 * KB;
+ MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
+ entry()->Compile(masm);
+ entry()->Unmark();
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ FunctionLiteral* fun = CfgGlobals::current()->fun();
+ ZoneScopeInfo info(fun->scope());
+ InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
+ Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, masm);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code) {
+ // Print the source code if available.
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(fun->start_position());
+ // fun->end_position() points to the last character in the
+ // stream. We need to compensate by adding one to calculate the
+ // length.
+ int source_len = fun->end_position() - fun->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
+ }
+ PrintF("--- Code ---\n");
+ code->Disassemble(*fun->name()->ToCString());
+ }
+#endif
+
+ return code;
+}
+
+
+void MoveInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
+ if (temp == value()) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
+void PropLoadInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
+ if (temp == object() || temp == key()) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
+void BinaryOpInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
+ if (temp == left() || temp == right()) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
+void ReturnInstr::FastAllocate(TempLocation* temp) {
+ ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
+ if (temp == value()) {
+ temp->set_where(TempLocation::ACCUMULATOR);
+ } else {
+ temp->set_where(TempLocation::STACK);
+ }
+}
+
+
+void PositionInstr::Compile(MacroAssembler* masm) {
+ if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
+ masm->RecordStatementPosition(pos_);
+ masm->RecordPosition(pos_);
+ }
+}
+
+
+void MoveInstr::Compile(MacroAssembler* masm) {
+ location()->Move(masm, value());
+}
+
+
+// The expression builder should not be used for declarations or statements.
+void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+#define DEFINE_VISIT(type) \
+ void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
+STATEMENT_NODE_LIST(DEFINE_VISIT)
+#undef DEFINE_VISIT
+
+
+// Macros (temporarily) handling unsupported expression types.
+#define BAILOUT(reason) \
+ do { \
+ graph_ = NULL; \
+ return; \
+ } while (false)
+
+void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ BAILOUT("FunctionLiteral");
+}
+
+
+void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
+ BAILOUT("Conditional");
+}
+
+
+void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
+ BAILOUT("Slot");
+}
+
+
+void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
+ Expression* rewrite = expr->var()->rewrite();
+ if (rewrite == NULL || rewrite->AsSlot() == NULL) {
+ BAILOUT("unsupported variable (not a slot)");
+ }
+ Slot* slot = rewrite->AsSlot();
+ if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
+ BAILOUT("unsupported slot type (not a parameter or local)");
+ }
+ // Ignore the passed destination.
+ value_ = new SlotLocation(slot->type(), slot->index());
+}
+
+
+void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
+ // Ignore the passed destination.
+ value_ = new Constant(expr->handle());
+}
+
+
+void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ BAILOUT("RegExpLiteral");
+}
+
+
+void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ BAILOUT("ObjectLiteral");
+}
+
+
+void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ BAILOUT("ArrayLiteral");
+}
+
+
+void ExpressionCfgBuilder::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ BAILOUT("CatchExtensionObject");
+}
+
+
+void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
+ if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
+ BAILOUT("unsupported compound assignment");
+ }
+ Expression* lhs = expr->target();
+ if (lhs->AsProperty() != NULL) {
+ BAILOUT("unsupported property assignment");
+ }
+ Variable* var = lhs->AsVariableProxy()->AsVariable();
+ if (var == NULL) {
+ BAILOUT("unsupported invalid left-hand side");
+ }
+ if (var->is_global()) {
+ BAILOUT("unsupported global variable");
+ }
+ Slot* slot = var->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
+ BAILOUT("unsupported slot lhs (not a parameter or local)");
+ }
+
+ ExpressionCfgBuilder builder;
+ SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
+ builder.Build(expr->value(), loc);
+ if (builder.graph() == NULL) {
+ BAILOUT("unsupported expression in assignment");
+ }
+ // If the expression did not come back in the slot location, append
+ // a move to the CFG.
+ graph_ = builder.graph();
+ if (builder.value() != loc) {
+ graph()->Append(new MoveInstr(loc, builder.value()));
+ }
+ // Record the assignment.
+ assigned_vars_.AddElement(loc);
+ // Ignore the destination passed to us.
+ value_ = loc;
+}
+
+
+void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
+ BAILOUT("Throw");
+}
+
+
+void ExpressionCfgBuilder::VisitProperty(Property* expr) {
+ ExpressionCfgBuilder object, key;
+ object.Build(expr->obj(), NULL);
+ if (object.graph() == NULL) {
+ BAILOUT("unsupported object subexpression in propref");
+ }
+ key.Build(expr->key(), NULL);
+ if (key.graph() == NULL) {
+ BAILOUT("unsupported key subexpression in propref");
+ }
+
+ if (destination_ == NULL) destination_ = new TempLocation();
+
+ graph_ = object.graph();
+ // Insert a move to a fresh temporary if the object value is in a slot
+ // that's assigned in the key.
+ Location* temp = NULL;
+ if (object.value()->is_slot() &&
+ key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
+ temp = new TempLocation();
+ graph()->Append(new MoveInstr(temp, object.value()));
+ }
+ graph()->Concatenate(key.graph());
+ graph()->Append(new PropLoadInstr(destination_,
+ temp == NULL ? object.value() : temp,
+ key.value()));
+
+ assigned_vars_ = *object.assigned_vars();
+ assigned_vars()->Union(key.assigned_vars());
+
+ value_ = destination_;
+}
+
+
+void ExpressionCfgBuilder::VisitCall(Call* expr) {
+ BAILOUT("Call");
+}
+
+
+void ExpressionCfgBuilder::VisitCallEval(CallEval* expr) {
+ BAILOUT("CallEval");
+}
+
+
+void ExpressionCfgBuilder::VisitCallNew(CallNew* expr) {
+ BAILOUT("CallNew");
+}
+
+
+void ExpressionCfgBuilder::VisitCallRuntime(CallRuntime* expr) {
+ BAILOUT("CallRuntime");
+}
+
+
+void ExpressionCfgBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+ BAILOUT("UnaryOperation");
+}
+
+
+void ExpressionCfgBuilder::VisitCountOperation(CountOperation* expr) {
+ BAILOUT("CountOperation");
+}
+
+
+void ExpressionCfgBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+ Token::Value op = expr->op();
+ switch (op) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ BAILOUT("unsupported binary operation");
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ ExpressionCfgBuilder left, right;
+ left.Build(expr->left(), NULL);
+ if (left.graph() == NULL) {
+ BAILOUT("unsupported left subexpression in binop");
+ }
+ right.Build(expr->right(), NULL);
+ if (right.graph() == NULL) {
+ BAILOUT("unsupported right subexpression in binop");
+ }
+
+ if (destination_ == NULL) destination_ = new TempLocation();
+
+ graph_ = left.graph();
+ // Insert a move to a fresh temporary if the left value is in a
+ // slot that's assigned on the right.
+ Location* temp = NULL;
+ if (left.value()->is_slot() &&
+ right.assigned_vars()->Contains(SlotLocation::cast(left.value()))) {
+ temp = new TempLocation();
+ graph()->Append(new MoveInstr(temp, left.value()));
+ }
+ graph()->Concatenate(right.graph());
+ graph()->Append(new BinaryOpInstr(destination_, op,
+ temp == NULL ? left.value() : temp,
+ right.value()));
+
+ assigned_vars_ = *left.assigned_vars();
+ assigned_vars()->Union(right.assigned_vars());
+
+ value_ = destination_;
+ return;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ExpressionCfgBuilder::VisitCompareOperation(CompareOperation* expr) {
+ BAILOUT("CompareOperation");
+}
+
+
+void ExpressionCfgBuilder::VisitThisFunction(ThisFunction* expr) {
+ BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+
+
+// Macros (temporarily) handling unsupported statement types.
+#define BAILOUT(reason) \
+ do { \
+ graph_ = NULL; \
+ return; \
+ } while (false)
+
+#define CHECK_BAILOUT() \
+ if (graph() == NULL) { return; } else {}
+
+void StatementCfgBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT();
+ if (!graph()->has_exit()) return;
+ }
+}
+
+
+// The statement builder should not be used for declarations or expressions.
+void StatementCfgBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
+
+#define DEFINE_VISIT(type) \
+ void StatementCfgBuilder::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEFINE_VISIT)
+#undef DEFINE_VISIT
+
+
+void StatementCfgBuilder::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void StatementCfgBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ ExpressionCfgBuilder builder;
+ builder.Build(stmt->expression(), CfgGlobals::current()->nowhere());
+ if (builder.graph() == NULL) {
+ BAILOUT("unsupported expression in expression statement");
+ }
+ graph()->Append(new PositionInstr(stmt->statement_pos()));
+ graph()->Concatenate(builder.graph());
+}
+
+
+void StatementCfgBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void StatementCfgBuilder::VisitIfStatement(IfStatement* stmt) {
+ BAILOUT("IfStatement");
+}
+
+
+void StatementCfgBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ BAILOUT("ContinueStatement");
+}
+
+
+void StatementCfgBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ BAILOUT("BreakStatement");
+}
+
+
+void StatementCfgBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ ExpressionCfgBuilder builder;
+ builder.Build(stmt->expression(), NULL);
+ if (builder.graph() == NULL) {
+ BAILOUT("unsupported expression in return statement");
+ }
+
+ graph()->Append(new PositionInstr(stmt->statement_pos()));
+ graph()->Concatenate(builder.graph());
+ graph()->AppendReturnInstruction(builder.value());
+}
+
+
+void StatementCfgBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ BAILOUT("WithEnterStatement");
+}
+
+
+void StatementCfgBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+ BAILOUT("WithExitStatement");
+}
+
+
+void StatementCfgBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
+
+
+void StatementCfgBuilder::VisitLoopStatement(LoopStatement* stmt) {
+ BAILOUT("LoopStatement");
+}
+
+
+void StatementCfgBuilder::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
+}
+
+
+void StatementCfgBuilder::VisitTryCatch(TryCatch* stmt) {
+ BAILOUT("TryCatch");
+}
+
+
+void StatementCfgBuilder::VisitTryFinally(TryFinally* stmt) {
+ BAILOUT("TryFinally");
+}
+
+
+void StatementCfgBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ BAILOUT("DebuggerStatement");
+}
+
+
+#ifdef DEBUG
+// CFG printing support (via depth-first, preorder block traversal).
+
+void Cfg::Print() {
+ entry_->Print();
+ entry_->Unmark();
+}
+
+
+void Constant::Print() {
+ PrintF("Constant(");
+ handle_->Print();
+ PrintF(")");
+}
+
+
+void Nowhere::Print() {
+ PrintF("Nowhere");
+}
+
+
+void SlotLocation::Print() {
+ PrintF("Slot(");
+ switch (type_) {
+ case Slot::PARAMETER:
+ PrintF("PARAMETER, %d)", index_);
+ break;
+ case Slot::LOCAL:
+ PrintF("LOCAL, %d)", index_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Print() {
+ PrintF("Temp(%d)", number());
+}
+
+
+void MoveInstr::Print() {
+ PrintF("Move(");
+ location()->Print();
+ PrintF(", ");
+ value_->Print();
+ PrintF(")\n");
+}
+
+
+void PropLoadInstr::Print() {
+ PrintF("PropLoad(");
+ location()->Print();
+ PrintF(", ");
+ object()->Print();
+ PrintF(", ");
+ key()->Print();
+ PrintF(")\n");
+}
+
+
+void BinaryOpInstr::Print() {
+ PrintF("BinaryOp(");
+ location()->Print();
+ PrintF(", %s, ", Token::Name(op()));
+ left()->Print();
+ PrintF(", ");
+ right()->Print();
+ PrintF(")\n");
+}
+
+
+void ReturnInstr::Print() {
+ PrintF("Return(");
+ value_->Print();
+ PrintF(")\n");
+}
+
+
+void InstructionBlock::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ PrintF("L%d:\n", number());
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ instructions_[i]->Print();
+ }
+ PrintF("Goto L%d\n\n", successor_->number());
+ successor_->Print();
+ }
+}
+
+
+void EntryNode::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ successor_->Print();
+ }
+}
+
+
+void ExitNode::Print() {
+ if (!is_marked_) {
+ is_marked_ = true;
+ PrintF("L%d:\nExit\n\n", number());
+ }
+}
+
+#endif // DEBUG
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/cfg.h b/V8Binding/v8/src/cfg.h
new file mode 100644
index 0000000..2031839
--- /dev/null
+++ b/V8Binding/v8/src/cfg.h
@@ -0,0 +1,819 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CFG_H_
+#define V8_CFG_H_
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+class ExitNode;
+class Location;
+
+// Translate a source AST into a control-flow graph (CFG). The CFG contains
+// single-entry, single-exit blocks of straight-line instructions and
+// administrative nodes.
+//
+// Instructions are described by the following grammar.
+//
+// <Instruction> ::=
+// Move <Location> <Value>
+// | PropLoad <Location> <Value> <Value>
+// | BinaryOp <Location> Token::Value <Value> <Value>
+// | Return Nowhere <Value>
+// | Position <Int>
+//
+// Values are trivial expressions:
+//
+// <Value> ::= Constant | <Location>
+//
+// Locations are storable values ('lvalues'). They can be slots,
+// compiler-generated temporaries, or the special location 'Nowhere'
+// indicating that no value is needed.
+//
+// <Location> ::=
+// SlotLocation Slot::Type <Index>
+// | TempLocation
+// | Nowhere
+
+
+// Administrative nodes: There are several types of 'administrative' nodes
+// that do not contain instructions and do not necessarily have a single
+// predecessor and a single successor.
+//
+// EntryNode: there is a distinguished entry node that has no predecessors
+// and a single successor.
+//
+// ExitNode: there is a distinguished exit node that has arbitrarily many
+// predecessors and no successor.
+//
+// JoinNode: join nodes have multiple predecessors and a single successor.
+//
+// BranchNode: branch nodes have a single predecessor and multiple
+// successors.
+
+
+// A convenient class to keep 'global' values when building a CFG. Since
+// CFG construction can be invoked recursively, CFG globals are stacked.
+class CfgGlobals BASE_EMBEDDED {
+ public:
+ explicit CfgGlobals(FunctionLiteral* fun);
+
+ ~CfgGlobals() { top_ = previous_; }
+
+ static CfgGlobals* current() {
+ ASSERT(top_ != NULL);
+ return top_;
+ }
+
+ // The function currently being compiled.
+ FunctionLiteral* fun() { return global_fun_; }
+
+ // The shared global exit node for all exits from the function.
+ ExitNode* exit() { return global_exit_; }
+
+ // A singleton.
+ Location* nowhere() { return nowhere_; }
+
+#ifdef DEBUG
+ int next_node_number() { return node_counter_++; }
+ int next_temp_number() { return temp_counter_++; }
+#endif
+
+ private:
+ static CfgGlobals* top_;
+ FunctionLiteral* global_fun_;
+ ExitNode* global_exit_;
+ Location* nowhere_;
+
+#ifdef DEBUG
+ // Used to number nodes and temporaries when printing.
+ int node_counter_;
+ int temp_counter_;
+#endif
+
+ CfgGlobals* previous_;
+};
+
+
+class SlotLocation;
+
+// Values represent trivial source expressions: ones with no side effects
+// and that do not require code to be generated.
+class Value : public ZoneObject {
+ public:
+ virtual ~Value() {}
+
+ // Predicates:
+
+ virtual bool is_temporary() { return false; }
+ virtual bool is_slot() { return false; }
+ virtual bool is_constant() { return false; }
+
+ // True if the value is a temporary allocated to the stack in
+ // fast-compilation mode.
+ virtual bool is_on_stack() { return false; }
+
+ // Support for fast-compilation mode:
+
+ // Move the value into a register.
+ virtual void Get(MacroAssembler* masm, Register reg) = 0;
+
+ // Push the value on the stack.
+ virtual void Push(MacroAssembler* masm) = 0;
+
+ // Move the value into a slot location.
+ virtual void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+};
+
+
+// A compile-time constant that appeared as a literal in the source AST.
+class Constant : public Value {
+ public:
+ explicit Constant(Handle<Object> handle) : handle_(handle) {}
+
+ // Cast accessor.
+ static Constant* cast(Value* value) {
+ ASSERT(value->is_constant());
+ return reinterpret_cast<Constant*>(value);
+ }
+
+ // Accessors.
+ Handle<Object> handle() { return handle_; }
+
+ // Predicates.
+ bool is_constant() { return true; }
+
+ // Support for fast-compilation mode.
+ void Get(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
+ void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Handle<Object> handle_;
+};
+
+
+// Locations are values that can be stored into ('lvalues').
+class Location : public Value {
+ public:
+ virtual ~Location() {}
+
+ // Static factory function returning the singleton nowhere location.
+ static Location* Nowhere() {
+ return CfgGlobals::current()->nowhere();
+ }
+
+ // Support for fast-compilation mode:
+
+ // Assumes temporaries have been allocated.
+ virtual void Get(MacroAssembler* masm, Register reg) = 0;
+
+ // Store the value in a register to the location. Assumes temporaries
+ // have been allocated.
+ virtual void Set(MacroAssembler* masm, Register reg) = 0;
+
+ // Assumes temporaries have been allocated, and if the value is a
+ // temporary it was not allocated to the stack.
+ virtual void Push(MacroAssembler* masm) = 0;
+
+ // Emit code to move a value into this location.
+ virtual void Move(MacroAssembler* masm, Value* value) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+};
+
+
+// Nowhere is a special (singleton) location that indicates the value of a
+// computation is not needed (though its side effects are).
+class Nowhere : public Location {
+ public:
+ // We should not try to emit code to read Nowhere.
+ void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
+ void Push(MacroAssembler* masm) { UNREACHABLE(); }
+ void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { UNREACHABLE(); }
+
+ // Setting Nowhere is ignored.
+ void Set(MacroAssembler* masm, Register reg) {}
+ void Move(MacroAssembler* masm, Value* value) {}
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Nowhere() {}
+
+ friend class CfgGlobals;
+};
+
+
+// SlotLocations represent parameters and stack-allocated (i.e.,
+// non-context) local variables.
+class SlotLocation : public Location {
+ public:
+ SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
+
+ // Cast accessor.
+ static SlotLocation* cast(Value* value) {
+ ASSERT(value->is_slot());
+ return reinterpret_cast<SlotLocation*>(value);
+ }
+
+ // Accessors.
+ Slot::Type type() { return type_; }
+ int index() { return index_; }
+
+ // Predicates.
+ bool is_slot() { return true; }
+
+ // Support for fast-compilation mode.
+ void Get(MacroAssembler* masm, Register reg);
+ void Set(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
+ void Move(MacroAssembler* masm, Value* value);
+ void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Slot::Type type_;
+ int index_;
+};
+
+
+// TempLocations represent compiler generated temporaries. They are
+// allocated to registers or memory either before code generation (in the
+// optimized-for-speed compiler) or on the fly during code generation (in
+// the optimized-for-space compiler).
+class TempLocation : public Location {
+ public:
+ // Fast-compilation mode allocation decisions.
+ enum Where {
+ NOT_ALLOCATED, // Not yet allocated.
+ ACCUMULATOR, // Allocated to the dedicated accumulator register.
+ STACK // " " " " stack.
+ };
+
+ TempLocation() : where_(NOT_ALLOCATED) {
+#ifdef DEBUG
+ number_ = -1;
+#endif
+ }
+
+ // Cast accessor.
+ static TempLocation* cast(Value* value) {
+ ASSERT(value->is_temporary());
+ return reinterpret_cast<TempLocation*>(value);
+ }
+
+ // Accessors.
+ Where where() { return where_; }
+ void set_where(Where where) { where_ = where; }
+
+ // Predicates.
+ bool is_on_stack() { return where_ == STACK; }
+ bool is_temporary() { return true; }
+
+ // Support for fast-compilation mode. Assume the temp has been allocated.
+ void Get(MacroAssembler* masm, Register reg);
+ void Set(MacroAssembler* masm, Register reg);
+ void Push(MacroAssembler* masm);
+ void Move(MacroAssembler* masm, Value* value);
+ void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
+
+#ifdef DEBUG
+ int number() {
+ if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
+ return number_;
+ }
+
+ void Print();
+#endif
+
+ private:
+ Where where_;
+
+#ifdef DEBUG
+ int number_;
+#endif
+};
+
+
+// Instructions are computations. The represent non-trivial source
+// expressions: typically ones that have side effects and require code to
+// be generated.
+class Instruction : public ZoneObject {
+ public:
+ // Every instruction has a location where its result is stored (which may
+ // be Nowhere, the default).
+ Instruction() : location_(CfgGlobals::current()->nowhere()) {}
+
+ explicit Instruction(Location* location) : location_(location) {}
+
+ virtual ~Instruction() {}
+
+ // Accessors.
+ Location* location() { return location_; }
+ void set_location(Location* location) { location_ = location; }
+
+ // Support for fast-compilation mode:
+
+ // Emit code to perform the instruction.
+ virtual void Compile(MacroAssembler* masm) = 0;
+
+ // Allocate a temporary which is the result of the immediate predecessor
+ // instruction. It is allocated to the accumulator register if it is used
+ // as an operand to this instruction, otherwise to the stack.
+ virtual void FastAllocate(TempLocation* temp) = 0;
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ Location* location_;
+};
+
+
+// A phantom instruction that indicates the start of a statement. It
+// causes the statement position to be recorded in the relocation
+// information but generates no code.
+class PositionInstr : public Instruction {
+ public:
+ explicit PositionInstr(int pos) : pos_(pos) {}
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+
+ // This should not be called. The last instruction of the previous
+ // statement should not have a temporary as its location.
+ void FastAllocate(TempLocation* temp) { UNREACHABLE(); }
+
+#ifdef DEBUG
+ // Printing support. Print nothing.
+ void Print() {}
+#endif
+
+ private:
+ int pos_;
+};
+
+
+// Move a value to a location.
+class MoveInstr : public Instruction {
+ public:
+ MoveInstr(Location* loc, Value* value) : Instruction(loc), value_(value) {}
+
+ // Accessors.
+ Value* value() { return value_; }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Value* value_;
+};
+
+
+// Load a property from a receiver, leaving the result in a location.
+class PropLoadInstr : public Instruction {
+ public:
+ PropLoadInstr(Location* loc, Value* object, Value* key)
+ : Instruction(loc), object_(object), key_(key) {
+ }
+
+ // Accessors.
+ Value* object() { return object_; }
+ Value* key() { return key_; }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Value* object_;
+ Value* key_;
+};
+
+
+// Perform a (non-short-circuited) binary operation on a pair of values,
+// leaving the result in a location.
+class BinaryOpInstr : public Instruction {
+ public:
+ BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
+ : Instruction(loc), op_(op), left_(left), right_(right) {
+ }
+
+ // Accessors.
+ Token::Value op() { return op_; }
+ Value* left() { return left_; }
+ Value* right() { return right_; }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Token::Value op_;
+ Value* left_;
+ Value* right_;
+};
+
+
+// Return a value. Has the side effect of moving its value into the return
+// value register. Can only occur as the last instruction in an instruction
+// block, and implies that the block is closed (cannot have instructions
+// appended or graph fragments concatenated to the end) and that the block's
+// successor is the global exit node for the current function.
+class ReturnInstr : public Instruction {
+ public:
+ explicit ReturnInstr(Value* value) : value_(value) {}
+
+ virtual ~ReturnInstr() {}
+
+ // Accessors.
+ Value* value() { return value_; }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ Value* value_;
+};
+
+
+// Nodes make up control-flow graphs.
+class CfgNode : public ZoneObject {
+ public:
+ CfgNode() : is_marked_(false) {
+#ifdef DEBUG
+ number_ = -1;
+#endif
+ }
+
+ virtual ~CfgNode() {}
+
+ // Because CFGs contain cycles, nodes support marking during traversal
+ // (e.g., for printing or compilation). The traversal functions will mark
+ // unmarked nodes and backtrack if they encounter a marked one. After a
+ // traversal, the graph should be explicitly unmarked by calling Unmark on
+ // the entry node.
+ bool is_marked() { return is_marked_; }
+ virtual void Unmark() = 0;
+
+ // Predicates:
+
+ // True if the node is an instruction block.
+ virtual bool is_block() { return false; }
+
+ // Support for fast-compilation mode. Emit the instructions or control
+ // flow represented by the node.
+ virtual void Compile(MacroAssembler* masm) = 0;
+
+#ifdef DEBUG
+ int number() {
+ if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
+ return number_;
+ }
+
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ bool is_marked_;
+
+#ifdef DEBUG
+ int number_;
+#endif
+};
+
+
+// A block is a single-entry, single-exit block of instructions.
+class InstructionBlock : public CfgNode {
+ public:
+ InstructionBlock() : successor_(NULL), instructions_(4) {}
+
+ virtual ~InstructionBlock() {}
+
+ void Unmark();
+
+ // Cast accessor.
+ static InstructionBlock* cast(CfgNode* node) {
+ ASSERT(node->is_block());
+ return reinterpret_cast<InstructionBlock*>(node);
+ }
+
+ bool is_block() { return true; }
+
+ // Accessors.
+ CfgNode* successor() { return successor_; }
+
+ void set_successor(CfgNode* succ) {
+ ASSERT(successor_ == NULL);
+ successor_ = succ;
+ }
+
+ ZoneList<Instruction*>* instructions() { return &instructions_; }
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+
+ // Add an instruction to the end of the block.
+ void Append(Instruction* instr) { instructions_.Add(instr); }
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ CfgNode* successor_;
+ ZoneList<Instruction*> instructions_;
+};
+
+
+// An entry node (one per function).
+class EntryNode : public CfgNode {
+ public:
+ explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
+
+ virtual ~EntryNode() {}
+
+ void Unmark();
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+ private:
+ InstructionBlock* successor_;
+};
+
+
+// An exit node (one per function).
+class ExitNode : public CfgNode {
+ public:
+ ExitNode() {}
+
+ virtual ~ExitNode() {}
+
+ void Unmark();
+
+ // Support for fast-compilation mode.
+ void Compile(MacroAssembler* masm);
+
+#ifdef DEBUG
+ void Print();
+#endif
+};
+
+
+// A CFG consists of a linked structure of nodes. Nodes are linked by
+// pointing to their successors, always beginning with a (single) entry node
+// (not necessarily of type EntryNode). If it is still possible to add
+// nodes to the end of the graph (i.e., there is a (single) path that does
+// not end with the global exit node), then the CFG has an exit node as
+// well.
+//
+// The empty CFG is represented by a NULL entry and a NULL exit.
+//
+// We use the term 'open fragment' to mean a CFG whose entry and exits are
+// both instruction blocks. It is always possible to add instructions and
+// nodes to the beginning or end of an open fragment.
+//
+// We use the term 'closed fragment' to mean a CFG whose entry is an
+// instruction block and whose exit is NULL (all paths go to the global
+// exit).
+//
+// We use the term 'fragment' to refer to a CFG that is known to be an open
+// or closed fragment.
+class Cfg : public ZoneObject {
+ public:
+ // Create an empty CFG fragment.
+ Cfg() : entry_(NULL), exit_(NULL) {}
+
+ // Build the CFG for a function. The returned CFG begins with an
+ // EntryNode and all paths end with the ExitNode.
+ static Cfg* Build();
+
+ // The entry and exit nodes of the CFG (not necessarily EntryNode and
+ // ExitNode).
+ CfgNode* entry() { return entry_; }
+ CfgNode* exit() { return exit_; }
+
+ // True if the CFG has no nodes.
+ bool is_empty() { return entry_ == NULL; }
+
+ // True if the CFG has an available exit node (i.e., it can be appended or
+ // concatenated to).
+ bool has_exit() { return exit_ != NULL; }
+
+ // Add an EntryNode to a CFG fragment. It is no longer a fragment
+ // (instructions can no longer be prepended).
+ void PrependEntryNode();
+
+ // Append an instruction to the end of an open fragment.
+ void Append(Instruction* instr);
+
+ // Appends a return instruction to the end of an open fragment and make
+ // it a closed fragment (the exit's successor becomes global exit node).
+ void AppendReturnInstruction(Value* value);
+
+ // Glue an other CFG fragment to the end of this (open) fragment.
+ void Concatenate(Cfg* other);
+
+ // Support for compilation. Compile the entire CFG.
+ Handle<Code> Compile(Handle<Script> script);
+
+#ifdef DEBUG
+ // Support for printing.
+ void Print();
+#endif
+
+ private:
+ // Entry and exit nodes.
+ CfgNode* entry_;
+ CfgNode* exit_;
+};
+
+
+// An implementation of a set of locations (currently slot locations), most
+// of the operations are destructive.
+class LocationSet BASE_EMBEDDED {
+ public:
+ // Construct an empty location set.
+ LocationSet() : parameters_(0), locals_(0) {}
+
+ // Raw accessors.
+ uintptr_t parameters() { return parameters_; }
+ uintptr_t locals() { return locals_; }
+
+ // Make this the empty set.
+ void Empty() {
+ parameters_ = locals_ = 0;
+ }
+
+ // Insert an element.
+ void AddElement(SlotLocation* location) {
+ if (location->type() == Slot::PARAMETER) {
+ // Parameter indexes begin with -1 ('this').
+ ASSERT(location->index() < kPointerSize - 1);
+ parameters_ |= (1 << (location->index() + 1));
+ } else {
+ ASSERT(location->type() == Slot::LOCAL);
+ ASSERT(location->index() < kPointerSize);
+ locals_ |= (1 << location->index());
+ }
+ }
+
+ // (Destructively) compute the union with another set.
+ void Union(LocationSet* other) {
+ parameters_ |= other->parameters();
+ locals_ |= other->locals();
+ }
+
+ bool Contains(SlotLocation* location) {
+ if (location->type() == Slot::PARAMETER) {
+ ASSERT(location->index() < kPointerSize - 1);
+ return (parameters_ & (1 << (location->index() + 1)));
+ } else {
+ ASSERT(location->type() == Slot::LOCAL);
+ ASSERT(location->index() < kPointerSize);
+ return (locals_ & (1 << location->index()));
+ }
+ }
+
+ private:
+ uintptr_t parameters_;
+ uintptr_t locals_;
+};
+
+
+// An ExpressionCfgBuilder traverses an expression and returns an open CFG
+// fragment (currently a possibly empty list of instructions represented by
+// a singleton instruction block) and the expression's value.
+//
+// Failure to build the CFG is indicated by a NULL CFG.
+class ExpressionCfgBuilder : public AstVisitor {
+ public:
+ ExpressionCfgBuilder() : destination_(NULL), value_(NULL), graph_(NULL) {}
+
+ // Result accessors.
+ Value* value() { return value_; }
+ Cfg* graph() { return graph_; }
+ LocationSet* assigned_vars() { return &assigned_vars_; }
+
+ // Build the cfg for an expression and remember its value. The
+ // destination is a 'hint' where the value should go which may be ignored.
+ // NULL is used to indicate no preference.
+ //
+ // Concretely, if the expression needs to generate a temporary for its
+ // value, it should use the passed destination or generate one if NULL.
+ void Build(Expression* expr, Location* destination) {
+ value_ = NULL;
+ graph_ = new Cfg();
+ assigned_vars_.Empty();
+ destination_ = destination;
+ Visit(expr);
+ }
+
+ // AST node visitors.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ // State for the visitor. Input parameters:
+ Location* destination_;
+
+ // Output parameters:
+ Value* value_;
+ Cfg* graph_;
+ LocationSet assigned_vars_;
+};
+
+
+// A StatementCfgBuilder maintains a CFG fragment accumulator. When it
+// visits a statement, it concatenates the CFG for the statement to the end
+// of the accumulator.
+class StatementCfgBuilder : public AstVisitor {
+ public:
+ StatementCfgBuilder() : graph_(new Cfg()) {}
+
+ Cfg* graph() { return graph_; }
+
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visitors.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ // State for the visitor. Input/output parameter:
+ Cfg* graph_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_CFG_H_
diff --git a/V8Binding/v8/src/codegen.cc b/V8Binding/v8/src/codegen.cc
index b7297d7..7a4bb12 100644
--- a/V8Binding/v8/src/codegen.cc
+++ b/V8Binding/v8/src/codegen.cc
@@ -496,7 +496,7 @@ void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
}
-void CodeGenerator::CodeForStatementPosition(Node* node) {
+void CodeGenerator::CodeForStatementPosition(AstNode* node) {
if (FLAG_debug_info) {
int pos = node->statement_pos();
if (pos != RelocInfo::kNoPosition) {
diff --git a/V8Binding/v8/src/compiler.cc b/V8Binding/v8/src/compiler.cc
index aecdfb9..f0d97fe 100644
--- a/V8Binding/v8/src/compiler.cc
+++ b/V8Binding/v8/src/compiler.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "cfg.h"
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
@@ -78,6 +79,22 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
return Handle<Code>::null();
}
+ if (FLAG_multipass) {
+ CfgGlobals scope(literal);
+ Cfg* cfg = Cfg::Build();
+#ifdef DEBUG
+ if (FLAG_print_cfg && cfg != NULL) {
+ SmartPointer<char> name = literal->name()->ToCString();
+ PrintF("Function \"%s\":\n", *name);
+ cfg->Print();
+ PrintF("\n");
+ }
+#endif
+ if (cfg != NULL) {
+ return cfg->Compile(script);
+ }
+ }
+
// Generate code and return it.
Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
return result;
diff --git a/V8Binding/v8/src/date-delay.js b/V8Binding/v8/src/date-delay.js
index 6adde46..0778dc9 100644
--- a/V8Binding/v8/src/date-delay.js
+++ b/V8Binding/v8/src/date-delay.js
@@ -156,6 +156,7 @@ var DST_offset_cache = {
// NOTE: The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per month.
+// If this function is called with NaN it returns NaN.
function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object.
var cache = DST_offset_cache;
@@ -219,6 +220,7 @@ var timezone_cache_time = $NaN;
var timezone_cache_timezone;
function LocalTimezone(t) {
+ if (NUMBER_IS_NAN(t)) return "";
if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
@@ -464,9 +466,11 @@ var Date_cache = {
value = cache.time;
} else {
value = DateParse(year);
- cache.time = value;
- cache.year = YearFromTime(LocalTimeNoCheck(value));
- cache.string = year;
+ if (!NUMBER_IS_NAN(value)) {
+ cache.time = value;
+ cache.year = YearFromTime(LocalTimeNoCheck(value));
+ cache.string = year;
+ }
}
} else {
@@ -647,11 +651,13 @@ function TimeString(time) {
function LocalTimezoneString(time) {
- var timezoneOffset = (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
+ var timezoneOffset =
+ (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
var sign = (timezoneOffset >= 0) ? 1 : -1;
var hours = FLOOR((sign * timezoneOffset)/60);
var min = FLOOR((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + TwoDigitString(hours) + TwoDigitString(min);
+ var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
+ TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + LocalTimezone(time) + ')';
}
diff --git a/V8Binding/v8/src/debug-agent.cc b/V8Binding/v8/src/debug-agent.cc
index 62cc251..3dba53a 100644
--- a/V8Binding/v8/src/debug-agent.cc
+++ b/V8Binding/v8/src/debug-agent.cc
@@ -254,8 +254,8 @@ SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
// Check that key is Content-Length.
if (strcmp(key, kContentLength) == 0) {
- // Get the content length value if within a sensible range.
- if (strlen(value) > 7) {
+ // Get the content length value if present and within a sensible range.
+ if (value == NULL || strlen(value) > 7) {
return SmartPointer<char>();
}
for (int i = 0; value[i] != '\0'; i++) {
diff --git a/V8Binding/v8/src/debug.cc b/V8Binding/v8/src/debug.cc
index 64f98c7..18536f5 100644
--- a/V8Binding/v8/src/debug.cc
+++ b/V8Binding/v8/src/debug.cc
@@ -1301,7 +1301,7 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// step into was requested.
if (fp == Debug::step_in_fp()) {
// Don't allow step into functions in the native context.
- if (function->context()->global() != Top::context()->builtins()) {
+ if (!function->IsBuiltin()) {
if (function->shared()->code() ==
Builtins::builtin(Builtins::FunctionApply) ||
function->shared()->code() ==
@@ -1310,7 +1310,8 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
- if (!holder.is_null() && holder->IsJSFunction()) {
+ if (!holder.is_null() && holder->IsJSFunction() &&
+ !JSFunction::cast(*holder)->IsBuiltin()) {
Handle<SharedFunctionInfo> shared_info(
JSFunction::cast(*holder)->shared());
Debug::FloodWithOneShot(shared_info);
diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc
index 40a9b4f..4ab6b61 100644
--- a/V8Binding/v8/src/execution.cc
+++ b/V8Binding/v8/src/execution.cc
@@ -83,6 +83,14 @@ static Handle<Object> Invoke(bool construct,
code = stub.GetCode();
}
+ // Convert calls on global objects to be calls on the global
+ // receiver instead to avoid having a 'this' pointer which refers
+ // directly to a global object.
+ if (receiver->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ receiver = Handle<JSObject>(global->global_receiver());
+ }
+
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
@@ -587,6 +595,23 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value();
}
+ {
+ JavaScriptFrameIterator it;
+ ASSERT(!it.done());
+ Object* fun = it.frame()->function();
+ if (fun && fun->IsJSFunction()) {
+ // Don't stop in builtin functions.
+ if (JSFunction::cast(fun)->IsBuiltin()) {
+ return Heap::undefined_value();
+ }
+ GlobalObject* global = JSFunction::cast(fun)->context()->global();
+ // Don't stop in debugger functions.
+ if (Debug::IsDebugGlobal(global)) {
+ return Heap::undefined_value();
+ }
+ }
+ }
+
// Collect the break state before clearing the flags.
bool debug_command_only =
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
diff --git a/V8Binding/v8/src/execution.h b/V8Binding/v8/src/execution.h
index 8cfdec2..126b172 100644
--- a/V8Binding/v8/src/execution.h
+++ b/V8Binding/v8/src/execution.h
@@ -205,7 +205,7 @@ class StackGuard BASE_EMBEDDED {
static void EnableInterrupts();
static void DisableInterrupts();
- static const uintptr_t kLimitSize = 512 * KB;
+ static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
static const uintptr_t kInterruptLimit = 0xfffffffe;
static const uintptr_t kIllegalLimit = 0xffffffff;
diff --git a/V8Binding/v8/src/factory.cc b/V8Binding/v8/src/factory.cc
index 1045a4c..36554df 100644
--- a/V8Binding/v8/src/factory.cc
+++ b/V8Binding/v8/src/factory.cc
@@ -210,6 +210,16 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
}
+Handle<PixelArray> Factory::NewPixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= length);
+ CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length,
+ external_pointer,
+ pretenure), PixelArray);
+}
+
+
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
}
diff --git a/V8Binding/v8/src/factory.h b/V8Binding/v8/src/factory.h
index 0afdd76..4db5d4e 100644
--- a/V8Binding/v8/src/factory.h
+++ b/V8Binding/v8/src/factory.h
@@ -154,6 +154,10 @@ class Factory : public AllStatic {
static Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED);
+ static Handle<PixelArray> NewPixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure = NOT_TENURED);
+
static Handle<Map> NewMap(InstanceType type, int instance_size);
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
diff --git a/V8Binding/v8/src/flag-definitions.h b/V8Binding/v8/src/flag-definitions.h
index b0770b0..3df11f7 100644
--- a/V8Binding/v8/src/flag-definitions.h
+++ b/V8Binding/v8/src/flag-definitions.h
@@ -133,6 +133,7 @@ DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"Minimum length for automatic enable preparsing")
+DEFINE_bool(multipass, false, "use the multipass code generator")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -267,6 +268,7 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
+DEFINE_bool(print_cfg, false, "print control-flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/V8Binding/v8/src/globals.h b/V8Binding/v8/src/globals.h
index 44bd527..195a2e2 100644
--- a/V8Binding/v8/src/globals.h
+++ b/V8Binding/v8/src/globals.h
@@ -207,6 +207,7 @@ class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
+class Array;
class JSArray;
class JSFunction;
class JSObject;
diff --git a/V8Binding/v8/src/handles.cc b/V8Binding/v8/src/handles.cc
index 510ea95..6345d41 100644
--- a/V8Binding/v8/src/handles.cc
+++ b/V8Binding/v8/src/handles.cc
@@ -164,8 +164,11 @@ void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode) {
- CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(mode));
+ PropertyNormalizationMode mode,
+ int expected_additional_properties) {
+ CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(
+ mode,
+ expected_additional_properties));
}
@@ -341,6 +344,14 @@ Handle<String> SubString(Handle<String> str, int start, int end) {
Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value) {
+ if (object->HasPixelElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ if (has_exception) return Handle<Object>();
+ value = number;
+ }
+ }
CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object);
}
@@ -643,13 +654,17 @@ bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
OptimizedObjectForAddingMultipleProperties::
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+ int expected_additional_properties,
bool condition) {
object_ = object;
if (condition && object_->HasFastProperties()) {
// Normalize the properties of object to avoid n^2 behavior
- // when extending the object multiple properties.
+ // when extending the object multiple properties. Indicate the number of
+ // properties to be added.
unused_property_fields_ = object->map()->unused_property_fields();
- NormalizeProperties(object_, KEEP_INOBJECT_PROPERTIES);
+ NormalizeProperties(object_,
+ KEEP_INOBJECT_PROPERTIES,
+ expected_additional_properties);
has_been_transformed_ = true;
} else {
diff --git a/V8Binding/v8/src/handles.h b/V8Binding/v8/src/handles.h
index a86dc96..ba2694f 100644
--- a/V8Binding/v8/src/handles.h
+++ b/V8Binding/v8/src/handles.h
@@ -181,7 +181,8 @@ class HandleScope {
// of space or encountering an internal error.
void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode);
+ PropertyNormalizationMode mode,
+ int expected_additional_properties);
void NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
@@ -336,6 +337,7 @@ class NoHandleAllocation BASE_EMBEDDED {
class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED {
public:
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
+ int expected_property_count,
bool condition = true);
~OptimizedObjectForAddingMultipleProperties();
private:
diff --git a/V8Binding/v8/src/heap-inl.h b/V8Binding/v8/src/heap-inl.h
index d27f14f..114ae0d 100644
--- a/V8Binding/v8/src/heap-inl.h
+++ b/V8Binding/v8/src/heap-inl.h
@@ -159,9 +159,7 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
-#ifndef V8_HOST_ARCH_64_BIT
Page::SetRSet(address, offset);
-#endif // V8_HOST_ARCH_64_BIT
}
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index 213eec5..7c91778 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -681,33 +681,11 @@ void Heap::Scavenge() {
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
-#ifdef V8_HOST_ARCH_64_BIT
- // TODO(X64): Make this go away again. We currently disable RSets for
- // 64-bit-mode.
- HeapObjectIterator old_pointer_iterator(old_pointer_space_);
- while (old_pointer_iterator.has_next()) {
- HeapObject* heap_object = old_pointer_iterator.next();
- heap_object->Iterate(&scavenge_visitor);
- }
- HeapObjectIterator map_iterator(map_space_);
- while (map_iterator.has_next()) {
- HeapObject* heap_object = map_iterator.next();
- heap_object->Iterate(&scavenge_visitor);
- }
- LargeObjectIterator lo_iterator(lo_space_);
- while (lo_iterator.has_next()) {
- HeapObject* heap_object = lo_iterator.next();
- if (heap_object->IsFixedArray()) {
- heap_object->Iterate(&scavenge_visitor);
- }
- }
-#else // !defined(V8_HOST_ARCH_64_BIT)
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
-#endif
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@@ -830,13 +808,11 @@ class UpdateRSetVisitor: public ObjectVisitor {
int Heap::UpdateRSet(HeapObject* obj) {
-#ifndef V8_HOST_ARCH_64_BIT
- // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
ASSERT(!InNewSpace(obj));
// Special handling of fixed arrays to iterate the body based on the start
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
// will not work because Page::SetRSet needs to have the start of the
- // object.
+ // object for large object pages.
if (obj->IsFixedArray()) {
FixedArray* array = FixedArray::cast(obj);
int length = array->length();
@@ -853,7 +829,6 @@ int Heap::UpdateRSet(HeapObject* obj) {
UpdateRSetVisitor v;
obj->Iterate(&v);
}
-#endif // V8_HOST_ARCH_64_BIT
return obj->Size();
}
@@ -1191,6 +1166,10 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_byte_array_map(Map::cast(obj));
+ obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
+ if (obj->IsFailure()) return false;
+ set_pixel_array_map(Map::cast(obj));
+
obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj));
@@ -1407,6 +1386,12 @@ bool Heap::CreateInitialObjects() {
if (obj->IsFailure()) return false;
set_the_hole_value(obj);
+ obj = CreateOddball(
+ oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
+ if (obj->IsFailure()) return false;
+ set_no_interceptor_result_sentinel(obj);
+
+
// Allocate the empty string.
obj = AllocateRawAsciiString(0, TENURED);
if (obj->IsFailure()) return false;
@@ -1433,13 +1418,15 @@ bool Heap::CreateInitialObjects() {
if (obj->IsFailure()) return false;
set_prototype_accessors(Proxy::cast(obj));
- // Allocate the code_stubs dictionary.
- obj = NumberDictionary::Allocate(4);
+ // Allocate the code_stubs dictionary. The initial size is set to avoid
+ // expanding the dictionary during bootstrapping.
+ obj = NumberDictionary::Allocate(128);
if (obj->IsFailure()) return false;
set_code_stubs(NumberDictionary::cast(obj));
- // Allocate the non_monomorphic_cache used in stub-cache.cc
- obj = NumberDictionary::Allocate(4);
+ // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
+ // is set to avoid expanding the dictionary during bootstrapping.
+ obj = NumberDictionary::Allocate(64);
if (obj->IsFailure()) return false;
set_non_monomorphic_cache(NumberDictionary::cast(obj));
@@ -1576,8 +1563,7 @@ Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@@ -1683,7 +1669,7 @@ Object* Heap::AllocateSlicedString(String* buffer,
int length = end - start;
// If the resulting string is small make a sub string.
- if (end - start <= String::kMinNonFlatLength) {
+ if (length <= String::kMinNonFlatLength) {
return Heap::AllocateSubString(buffer, start, end);
}
@@ -1859,6 +1845,23 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
}
+Object* Heap::AllocatePixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure) {
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
+
+ if (result->IsFailure()) return result;
+
+ reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
+ reinterpret_cast<PixelArray*>(result)->set_length(length);
+ reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
+
+ return result;
+}
+
+
Object* Heap::CreateCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
Code::Flags flags,
@@ -2077,6 +2080,11 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+ // Both types of globla objects should be allocated using
+ // AllocateGloblaObject to be properly initialized.
+ ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+ ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties = AllocateFixedArray(prop_size, pretenure);
@@ -2117,24 +2125,62 @@ Object* Heap::AllocateJSObject(JSFunction* constructor,
Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
+ Map* map = constructor->initial_map();
+
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
// require us to change property values to JSGlobalPropertyCells.
- ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0);
+ ASSERT(map->NextFreePropertyIndex() == 0);
// Make sure we don't have a ton of pre-allocated slots in the
// global objects. They will be unused once we normalize the object.
- ASSERT(constructor->initial_map()->unused_property_fields() == 0);
- ASSERT(constructor->initial_map()->inobject_properties() == 0);
+ ASSERT(map->unused_property_fields() == 0);
+ ASSERT(map->inobject_properties() == 0);
+
+ // Initial size of the backing store to avoid resize of the storage during
+ // bootstrapping. The size differs between the JS global object ad the
+ // builtins object.
+ int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+
+ // Allocate a dictionary object for backing storage.
+ Object* obj =
+ StringDictionary::Allocate(
+ map->NumberOfDescribedProperties() * 2 + initial_size);
+ if (obj->IsFailure()) return obj;
+ StringDictionary* dictionary = StringDictionary::cast(obj);
+
+ // The global object might be created from an object template with accessors.
+ // Fill these accessors into the dictionary.
+ DescriptorArray* descs = map->instance_descriptors();
+ for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ Object* value = descs->GetCallbacksObject(i);
+ value = Heap::AllocateJSGlobalPropertyCell(value);
+ if (value->IsFailure()) return value;
+
+ Object* result = dictionary->Add(descs->GetKey(i), value, d);
+ if (result->IsFailure()) return result;
+ dictionary = StringDictionary::cast(result);
+ }
- // Allocate the object based on the constructors initial map.
- Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED);
- if (result->IsFailure()) return result;
+ // Allocate the global object and initialize it with the backing store.
+ obj = Allocate(map, OLD_POINTER_SPACE);
+ if (obj->IsFailure()) return obj;
+ JSObject* global = JSObject::cast(obj);
+ InitializeJSObjectFromMap(global, dictionary, map);
- // Normalize the result.
- JSObject* global = JSObject::cast(result);
- result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
- if (result->IsFailure()) return result;
+ // Create a new map for the global object.
+ obj = map->CopyDropDescriptors();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
+
+ // Setup the global object as a normalized object.
+ global->set_map(new_map);
+ global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
+ global->set_properties(dictionary);
// Make sure result is a global object with properties in dictionary.
ASSERT(global->IsGlobalObject());
@@ -3391,6 +3437,100 @@ void HeapIterator::reset() {
}
+#ifdef ENABLE_LOGGING_AND_PROFILING
+namespace {
+
+// JSConstructorProfile is responsible for gathering and logging
+// "constructor profile" of JS object allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class JSConstructorProfile BASE_EMBEDDED {
+ public:
+ JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {}
+ void CollectStats(JSObject* obj);
+ void PrintStats();
+ // Used by ZoneSplayTree::ForEach.
+ void Call(String* name, const NumberAndSizeInfo& number_and_size);
+ private:
+ struct TreeConfig {
+ typedef String* Key;
+ typedef NumberAndSizeInfo Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ // Strings are unique, so it is sufficient to compare their pointers.
+ static int Compare(const Key& a, const Key& b) {
+ return a == b ? 0 : (a < b ? -1 : 1);
+ }
+ };
+
+ typedef ZoneSplayTree<TreeConfig> JSObjectsInfoTree;
+ static int CalculateJSObjectNetworkSize(JSObject* obj);
+
+ ZoneScope zscope_;
+ JSObjectsInfoTree js_objects_info_tree_;
+};
+
+const JSConstructorProfile::TreeConfig::Key
+ JSConstructorProfile::TreeConfig::kNoKey = NULL;
+const JSConstructorProfile::TreeConfig::Value
+ JSConstructorProfile::TreeConfig::kNoValue;
+
+
+int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) {
+ int size = obj->Size();
+ // If 'properties' and 'elements' are non-empty (thus, non-shared),
+ // take their size into account.
+ if (FixedArray::cast(obj->properties())->length() != 0) {
+ size += obj->properties()->Size();
+ }
+ if (FixedArray::cast(obj->elements())->length() != 0) {
+ size += obj->elements()->Size();
+ }
+ return size;
+}
+
+
+void JSConstructorProfile::Call(String* name,
+ const NumberAndSizeInfo& number_and_size) {
+ SmartPointer<char> s_name;
+ if (name != NULL) {
+ s_name = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ }
+ LOG(HeapSampleJSConstructorEvent(*s_name,
+ number_and_size.number(),
+ number_and_size.bytes()));
+}
+
+
+void JSConstructorProfile::CollectStats(JSObject* obj) {
+ String* constructor_func = NULL;
+ if (obj->map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
+ SharedFunctionInfo* sfi = constructor->shared();
+ String* name = String::cast(sfi->name());
+ constructor_func = name->length() > 0 ? name : sfi->inferred_name();
+ } else if (obj->IsJSFunction()) {
+ constructor_func = Heap::function_class_symbol();
+ }
+ JSObjectsInfoTree::Locator loc;
+ if (!js_objects_info_tree_.Find(constructor_func, &loc)) {
+ js_objects_info_tree_.Insert(constructor_func, &loc);
+ }
+ NumberAndSizeInfo number_and_size = loc.value();
+ number_and_size.increment_number(1);
+ number_and_size.increment_bytes(CalculateJSObjectNetworkSize(obj));
+ loc.set_value(number_and_size);
+}
+
+
+void JSConstructorProfile::PrintStats() {
+ js_objects_info_tree_.ForEach(this);
+}
+
+} // namespace
+#endif
+
+
//
// HeapProfiler class implementation.
//
@@ -3415,9 +3555,14 @@ void HeapProfiler::WriteSample() {
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME
+ JSConstructorProfile js_cons_profile;
HeapIterator iterator;
while (iterator.has_next()) {
- CollectStats(iterator.next(), info);
+ HeapObject* obj = iterator.next();
+ CollectStats(obj, info);
+ if (obj->IsJSObject()) {
+ js_cons_profile.CollectStats(JSObject::cast(obj));
+ }
}
// Lump all the string types together.
@@ -3439,6 +3584,8 @@ void HeapProfiler::WriteSample() {
}
}
+ js_cons_profile.PrintStats();
+
LOG(HeapSampleEndEvent("Heap", "allocated"));
}
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index 4e2c64c..30522dc 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -94,6 +94,7 @@ namespace internal {
UndetectableMediumAsciiStringMap) \
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, pixel_array_map, PixelArrayMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, hash_table_map, HashTableMap) \
V(Map, context_map, ContextMap) \
@@ -109,6 +110,7 @@ namespace internal {
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, undefined_value, UndefinedValue) \
+ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Object, minus_zero_value, MinusZeroValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
@@ -255,7 +257,7 @@ class Heap : public AllStatic {
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
static Address NewSpaceStart() { return new_space_.start(); }
- static uint32_t NewSpaceMask() { return new_space_.mask(); }
+ static uintptr_t NewSpaceMask() { return new_space_.mask(); }
static Address NewSpaceTop() { return new_space_.top(); }
static NewSpace* new_space() { return &new_space_; }
@@ -418,6 +420,14 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateByteArray(int length);
+ // Allocate a pixel array of the specified length
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ static Object* AllocatePixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure);
+
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -1113,11 +1123,9 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
HeapObject* object = HeapObject::cast(*current);
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
-#ifndef V8_TARGET_ARCH_X64
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
}
-#endif
}
}
}
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.h b/V8Binding/v8/src/ia32/assembler-ia32.h
index 70b510e..b648055 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/assembler-ia32.h
@@ -226,7 +226,9 @@ enum ScaleFactor {
times_1 = 0,
times_2 = 1,
times_4 = 2,
- times_8 = 3
+ times_8 = 3,
+ times_pointer_size = times_4,
+ times_half_pointer_size = times_2
};
diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc
index 3cafd90..a70a9d2 100644
--- a/V8Binding/v8/src/ia32/builtins-ia32.cc
+++ b/V8Binding/v8/src/ia32/builtins-ia32.cc
@@ -140,7 +140,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
- __ j(greater_equal, &rt_call);
+ __ j(above_equal, &rt_call);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -175,8 +175,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(new_space_allocation_top), edi);
- // Check if a properties array should be setup and allocate one if needed.
- // Otherwise initialize the properties to the empty_fixed_array as well.
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
// eax: initial map
// ebx: JSObject
// edi: start of next object
@@ -184,21 +184,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
// Calculate unused properties past the end of the in-object properties.
__ sub(edx, Operand(ecx));
- __ test(edx, Operand(edx));
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
- // eax: initial map
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
+ __ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(greater_equal, &undo_allocation);
+ __ j(above_equal, &undo_allocation);
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Initialize the FixedArray.
@@ -223,7 +221,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ add(Operand(eax), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(eax, Operand(ecx));
- __ j(less, &loop);
+ __ j(below, &loop);
}
// Store the initialized FixedArray into the properties field of
diff --git a/V8Binding/v8/src/ia32/cfg-ia32.cc b/V8Binding/v8/src/ia32/cfg-ia32.cc
new file mode 100644
index 0000000..58985a5
--- /dev/null
+++ b/V8Binding/v8/src/ia32/cfg-ia32.cc
@@ -0,0 +1,315 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "codegen-ia32.h"
+#include "macro-assembler-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(ebp);
+ __ mov(ebp, esp);
+ __ push(esi);
+ __ push(edi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(eax);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+}
+
+
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+ // A test eax instruction after the call indicates to the IC code that it
+ // was inlined. Ensure there is not one here.
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ pop(ebx); // Discard key.
+ } else {
+ key()->Get(masm, ecx);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ }
+ __ pop(ebx); // Discard receiver.
+ location()->Set(masm, eax);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!left()->is_on_stack()) left()->Push(masm);
+ right()->Push(masm);
+ GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ location()->Set(masm, eax);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
+ Comment cmnt(masm, "[ ReturnInstr");
+ value_->Get(masm, eax);
+}
+
+
+void Constant::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, Immediate(handle_));
+}
+
+
+void Constant::Push(MacroAssembler* masm) {
+ __ push(Immediate(handle_));
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(ebp, kOffset - loc->index() * kPointerSize);
+ }
+ default:
+ UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ mov(ToOperand(loc), Immediate(handle_));
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ mov(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // We dispatch to the value because in some cases (temp or constant)
+ // we can use a single instruction.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ // The accumulator is not live across a MoveInstr.
+ __ mov(eax, ToOperand(this));
+ __ mov(ToOperand(loc), eax);
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(reg, eax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(eax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(eax);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, eax);
+ break;
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ mov(ToOperand(loc), eax);
+ break;
+ case STACK:
+ __ pop(ToOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc
index 457b22f..9542b16 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.cc
+++ b/V8Binding/v8/src/ia32/codegen-ia32.cc
@@ -754,9 +754,9 @@ class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
- // operand on TOS+1. Returns operand as floating point number on FPU
- // stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
// operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
@@ -778,57 +778,6 @@ class FloatingPointHelper : public AllStatic {
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
const char* GenericBinaryOpStub::GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
@@ -5154,11 +5103,10 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
- __ shr(ebp_as_smi.reg(), kSmiTagSize);
frame_->Push(&ebp_as_smi);
}
@@ -5216,8 +5164,11 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
}
// Go slow case if argument to operation is out of range.
+ Result eax_reg = allocator_->Allocate(eax);
+ ASSERT(eax_reg.is_valid());
__ fnstsw_ax();
__ sahf();
+ eax_reg.Unuse();
call_runtime.Branch(parity_even, not_taken);
// Allocate heap number for result if possible.
@@ -6297,8 +6248,8 @@ void Reference::GetValue(TypeofState typeof_state) {
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- deferred->Branch(equal);
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
@@ -7016,19 +6967,19 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register scratch) {
+ Register number) {
Label load_smi, done;
- __ test(scratch, Immediate(kSmiTagMask));
+ __ test(number, Immediate(kSmiTagMask));
__ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
- __ sar(scratch, kSmiTagSize);
- __ push(scratch);
+ __ sar(number, kSmiTagSize);
+ __ push(number);
__ fild_s(Operand(esp, 0));
- __ pop(scratch);
+ __ pop(number);
__ bind(&done);
}
@@ -7786,7 +7737,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(NegateCondition(equal), &not_outermost_js);
+ __ j(not_equal, &not_outermost_js);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ bind(&not_outermost_js);
#endif
@@ -7837,7 +7788,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If current EBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(NegateCondition(equal), &not_outermost_js_2);
+ __ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.h b/V8Binding/v8/src/ia32/codegen-ia32.h
index 5cd50b8..1d0cc8b 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32.h
@@ -359,7 +359,7 @@ class CodeGenerator: public AstVisitor {
#define DEF_VISIT(type) \
void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
@@ -558,7 +558,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Node* node);
+ void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -603,12 +603,63 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
- friend class CodeGeneratorPatcher; // Used in test-log-ia32.cc
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc
index d64dee1..08ffe2f 100644
--- a/V8Binding/v8/src/ia32/ic-ia32.cc
+++ b/V8Binding/v8/src/ia32/ic-ia32.cc
@@ -234,11 +234,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : name
// -- esp[8] : receiver
// -----------------------------------
- Label slow, fast, check_string, index_int, index_string;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -269,11 +269,36 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(below, &fast, taken);
+ __ j(above_equal, &slow);
+ // Fast case: Do the load.
+ __ mov(eax,
+ Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+
+ // Check whether the elements is a pixel array.
+ // eax: untagged index
+ // ecx: elements array
+ __ bind(&check_pixel_array);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ j(not_equal, &slow);
+ __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+ __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+
+
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -315,16 +340,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ and_(eax, (1 << String::kShortLengthShift) - 1);
__ shr(eax, String::kLongLengthShift);
__ jmp(&index_int);
- // Fast case: Do the load.
- __ bind(&fast);
- __ mov(eax,
- Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, &slow, not_taken);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
- __ ret(0);
}
@@ -335,7 +350,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
@@ -370,8 +385,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array, not_taken);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(edx, Operand(ebx));
__ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
@@ -381,7 +396,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi)
__ j(below, &fast, taken);
-
// Slow case: Push extra copies of the arguments (3).
__ bind(&slow);
__ pop(ecx);
@@ -392,6 +406,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+ // Check whether the elements is a pixel array.
+ // eax: value
+ // ecx: elements array
+ // ebx: index (as a smi)
+ __ bind(&check_pixel_array);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ j(not_equal, &slow);
+ // Check that the value is a smi. If a conversion is needed call into the
+ // runtime to convert and clamp.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ __ sar(ebx, kSmiTagSize); // Untag the index.
+ __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ sar(eax, kSmiTagSize); // Untag the value.
+ { // Clamp the value to [0..255].
+ Label done, check_255;
+ __ cmp(eax, 0);
+ __ j(greater_equal, &check_255);
+ __ mov(eax, Immediate(0));
+ __ jmp(&done);
+ __ bind(&check_255);
+ __ cmp(eax, 255);
+ __ j(less_equal, &done);
+ __ mov(eax, Immediate(255));
+ __ bind(&done);
+ }
+ __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ ret(0);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -422,15 +467,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
__ j(above_equal, &extra, not_taken);
-
// Fast case: Do the store.
__ bind(&fast);
// eax: value
@@ -749,12 +793,10 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -----------------------------------
__ mov(eax, Operand(esp, kPointerSize));
-
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(eax);
- __ push(ecx);
- __ push(ebx);
+ __ push(eax); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -797,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
- Address test_instruction_address = address + 4;
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -823,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static bool PatchInlinedMapCheck(Address address, Object* map) {
- Address test_instruction_address = address + 4; // 4 = stub address
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -877,12 +921,10 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
-
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
+ __ push(ecx); // receiver
+ __ push(eax); // name
+ __ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -917,12 +959,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// -- esp[4] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(Operand(esp, 0));
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
+ __ push(Operand(esp, 0)); // receiver
+ __ push(ecx); // transition map
+ __ push(eax); // value
+ __ push(ebx); // return address
+
// Perform tail call to the entry.
__ TailCallRuntime(
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index fae1525..7782aa9 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -54,41 +54,47 @@ static void RecordWriteHelper(MacroAssembler* masm,
Register scratch) {
Label fast;
- // Compute the page address from the heap object pointer, leave it
- // in 'object'.
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
masm->and_(object, ~Page::kPageAlignmentMask);
+ Register page_start = object;
- // Compute the bit addr in the remembered set, leave it in "addr".
- masm->sub(addr, Operand(object));
+ // Compute the bit addr in the remembered set/index of the pointer in the
+ // page. Reuse 'addr' as pointer_offset.
+ masm->sub(addr, Operand(page_start));
masm->shr(addr, kObjectAlignmentBits);
+ Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
- masm->cmp(addr, Page::kPageSize / kPointerSize);
+ masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
masm->j(less, &fast);
- // Adjust 'addr' to be relative to the start of the extra remembered set
- // and the page address in 'object' to be the address of the extra
- // remembered set.
- masm->sub(Operand(addr), Immediate(Page::kPageSize / kPointerSize));
- // Load the array length into 'scratch' and multiply by four to get the
- // size in bytes of the elements.
- masm->mov(scratch, Operand(object, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- masm->shl(scratch, kObjectAlignmentBits);
- // Add the page header, array header, and array body size to the page
- // address.
- masm->add(Operand(object), Immediate(Page::kObjectStartOffset
- + FixedArray::kHeaderSize));
- masm->add(object, Operand(scratch));
-
+ // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+ // extra remembered set after the large object.
+
+ // Find the length of the large object (FixedArray).
+ masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ + FixedArray::kLengthOffset));
+ Register array_length = scratch;
+
+ // Extra remembered set starts right after the large object (a FixedArray), at
+ // page_start + kObjectStartOffset + objectSize
+ // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+ // Add the delta between the end of the normal RSet and the start of the
+ // extra RSet to 'object', so that addressing the bit using 'pointer_offset'
+ // hits the extra RSet words.
+ masm->lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
masm->bind(&fast);
- masm->bts(Operand(object, 0), addr);
+ masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
@@ -146,43 +152,30 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// for the remembered set bits.
Label done;
- // This optimization cannot survive serialization and deserialization,
- // so we disable as long as serialization can take place.
- int32_t new_space_start =
- reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
- if (Serializer::enabled() || new_space_start < 0) {
- // Cannot do smart bit-twiddling. Need to do two consecutive checks.
- // Check for Smi first.
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- // Test that the object address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // Skip barrier if writing a smi.
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
and_(value, Heap::NewSpaceMask());
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
- // move the value SmiTag into the sign bit
- shl(value, 31);
- // combine the object with value SmiTag
- or_(value, Operand(object));
- // remove the uninteresing bits inside the page
- and_(value, Heap::NewSpaceMask() | (1 << 31));
- // xor has two effects:
- // - if the value was a smi, then the result will be negative
- // - if the object is pointing into new space area the page bits will
- // all be zero
- xor_(value, new_space_start | (1 << 31));
- // Check for both conditions in one branch
- j(less_equal, &done);
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start().address());
+ lea(value, Operand(object, -new_space_start));
+ and_(value, Heap::NewSpaceMask());
+ j(equal, &done);
}
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
- mov(value, Operand(object));
+ lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
- add(Operand(value), Immediate(offset));
- shr(value, kObjectAlignmentBits);
+ shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
@@ -192,7 +185,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- bts(Operand(object, 0), value);
+ bts(Operand(object, Page::kRSetOffset), value);
} else {
Register dst = scratch;
if (offset != 0) {
@@ -201,7 +194,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
- lea(dst, Operand(object, dst, times_2,
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
index c5d7c05..a49c1f5 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -634,11 +634,9 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
- Label retry_stack_check;
Label stack_limit_hit;
Label stack_ok;
- __ bind(&retry_stack_check);
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ mov(ecx, esp);
@@ -658,10 +656,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CallCheckStackGuardState(ebx);
__ or_(eax, Operand(eax));
// If returned value is non-zero, we exit with the returned value as result.
- // Otherwise it was a preemption and we just check the limit again.
- __ j(equal, &retry_stack_check);
- // Return value was non-zero. Exit with exception or retry.
- __ jmp(&exit_label_);
+ __ j(not_zero, &exit_label_);
__ bind(&stack_ok);
@@ -757,24 +752,16 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Preempt-code
if (check_preempt_label_.is_linked()) {
- __ bind(&check_preempt_label_);
+ SafeCallTarget(&check_preempt_label_);
__ push(backtrack_stackpointer());
__ push(edi);
- Label retry;
-
- __ bind(&retry);
CallCheckStackGuardState(ebx);
__ or_(eax, Operand(eax));
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
- // Check if we are still preempted.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- __ j(below_equal, &retry);
__ pop(edi);
__ pop(backtrack_stackpointer());
@@ -785,7 +772,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
- __ bind(&stack_overflow_label_);
+ SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
Label grow_failed;
@@ -1262,17 +1249,19 @@ void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
+ __ call(to);
}
void RegExpMacroAssemblerIA32::SafeReturn() {
- __ pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
+ __ ret(0);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
}
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
index a06700a..c3d9155 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -227,6 +227,7 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to);
inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer (ecx) by a word size and stores the register's value there.
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index e47ad1c..a626377 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -152,6 +152,22 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+ __ push(receiver);
+ __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
@@ -273,6 +289,322 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(eax, Immediate(5));
+ __ mov(ebx, Immediate(ref));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm, eax,
+ holder, lookup->holder(),
+ lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ pop(scratch2); // save old return address
+ __ push(holder);
+ __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
+ __ push(holder);
+ __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+ __ push(name_);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ pop(scratch); // save old return address
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+ __ push(scratch); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit CallInterceptorCompiler(const ParameterCount& arguments)
+ : arguments_(arguments), argc_(arguments.immediate()) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ __ EnterInternalFrame();
+ __ push(holder); // save the holder
+
+ CompileCallLoadPropertyWithInterceptor(
+ masm,
+ receiver,
+ holder,
+ // Under EnterInternalFrame this refers to name.
+ Operand(ebp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ __ pop(receiver); // restore holder
+ __ LeaveInternalFrame();
+
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ Label invoke;
+ __ j(not_equal, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ if (lookup->holder()->IsGlobalObject()) {
+ __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
+ }
+
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ __ mov(edi, Immediate(Handle<JSFunction>(function)));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments_,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ Operand(ebp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(eax, Immediate(5));
+ __ mov(ebx, Immediate(ref));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+};
+
+
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
@@ -447,15 +779,17 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
- __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
- __ push(name_reg); // name
__ push(reg); // holder
+ __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
+ __ push(reg);
+ __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ push(name_reg); // name
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 4);
+ __ TailCallRuntime(load_callback_property, 5);
}
@@ -484,36 +818,25 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
- Smi* lookup_hint,
+ LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ pop(scratch2); // remove return address
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
- // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
- // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
- __ push(Immediate(lookup_hint));
- __ push(scratch2); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 4);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -678,13 +1001,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
- // Make sure object->elements()->map() != Heap::dictionary_array_map()
+ // Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &miss, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &miss, not_taken);
break;
default:
@@ -726,47 +1049,32 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the number of arguments.
const int argc = arguments().immediate();
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that maps have not changed and compute the holder register.
- Register reg =
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, ecx, name, &miss);
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push arguments on the expression stack.
- __ push(edx); // receiver
- __ push(reg); // holder
- __ push(Operand(ebp, (argc + 3) * kPointerSize)); // name
- __ push(Immediate(holder->InterceptorPropertyLookupHint(name)));
-
- // Perform call.
- ExternalReference load_interceptor =
- ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ mov(eax, Immediate(4));
- __ mov(ebx, Immediate(load_interceptor));
-
- CEntryStub stub;
- __ CallStub(&stub);
-
- // Move result to edi and restore receiver.
- __ mov(edi, eax);
- __ mov(edx, Operand(ebp, (argc + 2) * kPointerSize)); // receiver
-
- // Exit frame.
- __ LeaveInternalFrame();
+ CallInterceptorCompiler compiler(arguments());
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ edx,
+ ebx,
+ ecx,
+ &miss);
+
+ // Restore receiver.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss, not_taken);
// Patch the receiver on the stack with the global proxy if
@@ -777,6 +1085,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
// Invoke the function.
+ __ mov(edi, eax);
__ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
// Handle load cache miss.
@@ -798,8 +1107,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::call_global_inline, 1);
-
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -835,6 +1142,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ __ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -843,7 +1151,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::call_global_inline, 1);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1007,10 +1314,8 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
-
// Check that the map of the global has not changed.
- __ mov(ebx, (Operand(esp, kPointerSize)));
+ __ mov(ebx, Operand(esp, kPointerSize));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
@@ -1020,11 +1325,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
// Return the value (register eax).
+ __ IncrementCounter(&Counters::named_store_global_inline, 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::named_store_global_inline, 1);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1089,7 +1394,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1110,7 +1415,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss);
__ bind(&miss);
@@ -1132,7 +1437,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1152,12 +1457,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ __ mov(eax, Operand(esp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
holder,
- holder->InterceptorPropertyLookupHint(name),
+ &lookup,
eax,
ecx,
edx,
@@ -1185,10 +1493,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
-
// Get the receiver from the stack.
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
@@ -1214,10 +1520,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
__ ret(0);
__ bind(&miss);
- __ DecrementCounter(&Counters::named_load_global_inline, 1);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1237,8 +1543,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
@@ -1267,8 +1573,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@@ -1297,8 +1603,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
@@ -1326,17 +1632,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
- Smi::FromInt(JSObject::kLookupInHolder),
+ &lookup,
ecx,
eax,
edx,
@@ -1362,8 +1670,8 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
@@ -1388,8 +1696,8 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
@@ -1414,8 +1722,8 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.
diff --git a/V8Binding/v8/src/ic.cc b/V8Binding/v8/src/ic.cc
index 090d7a3..f4d74c9 100644
--- a/V8Binding/v8/src/ic.cc
+++ b/V8Binding/v8/src/ic.cc
@@ -737,9 +737,7 @@ Object* KeyedLoadIC::Load(State state,
return TypeError("non_object_property_load", object, name);
}
- // TODO(X64): Enable specialized stubs for length and prototype lookup.
-#ifndef V8_TARGET_ARCH_X64
- if (false && FLAG_use_ic) {
+ if (FLAG_use_ic) {
// Use specialized code for getting the length of strings.
if (object->IsString() && name->Equals(Heap::length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
@@ -778,7 +776,6 @@ Object* KeyedLoadIC::Load(State state,
return Accessors::FunctionGetPrototype(*object, 0);
}
}
-#endif // !V8_TARGET_ARCH_X64
// Check if the name is trivially convertible to an index and get
// the element or char if so.
@@ -801,13 +798,9 @@ Object* KeyedLoadIC::Load(State state,
}
}
- // TODO(X64): Enable inline caching for load.
-#ifndef V8_TARGET_ARCH_X64
- // Update the inline cache.
if (FLAG_use_ic && lookup.IsLoaded()) {
UpdateCaches(&lookup, state, object, name);
}
-#endif
PropertyAttributes attr;
if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
@@ -978,10 +971,6 @@ Object* StoreIC::Store(State state,
return *value;
}
- // TODO(X64): Enable inline cache for StoreIC.
-#ifdef V8_TARGET_ARCH_X64
- USE(&LookupForWrite); // The compiler complains otherwise.
-#else
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
@@ -989,7 +978,6 @@ Object* StoreIC::Store(State state,
UpdateCaches(&lookup, state, receiver, name, value);
}
}
-#endif
// Set the property.
return receiver->SetProperty(*name, *value, NONE);
@@ -1108,13 +1096,10 @@ Object* KeyedStoreIC::Store(State state,
LookupResult lookup;
receiver->LocalLookup(*name, &lookup);
- // TODO(X64): Enable inline cache for KeyedStoreIC.
-#ifndef V8_TARGET_ARCH_X64
// Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) {
UpdateCaches(&lookup, state, receiver, name, value);
}
-#endif
// Set the property.
return receiver->SetProperty(*name, *value, NONE);
diff --git a/V8Binding/v8/src/ic.h b/V8Binding/v8/src/ic.h
index 593519b..860b7e6 100644
--- a/V8Binding/v8/src/ic.h
+++ b/V8Binding/v8/src/ic.h
@@ -35,17 +35,19 @@ namespace internal {
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU) \
- ICU(LoadIC_Miss) \
- ICU(KeyedLoadIC_Miss) \
- ICU(CallIC_Miss) \
- ICU(StoreIC_Miss) \
- ICU(SharedStoreIC_ExtendStorage) \
- ICU(KeyedStoreIC_Miss) \
- /* Utilities for IC stubs. */ \
- ICU(LoadCallbackProperty) \
- ICU(StoreCallbackProperty) \
- ICU(LoadInterceptorProperty) \
+#define IC_UTIL_LIST(ICU) \
+ ICU(LoadIC_Miss) \
+ ICU(KeyedLoadIC_Miss) \
+ ICU(CallIC_Miss) \
+ ICU(StoreIC_Miss) \
+ ICU(SharedStoreIC_ExtendStorage) \
+ ICU(KeyedStoreIC_Miss) \
+ /* Utilities for IC stubs. */ \
+ ICU(LoadCallbackProperty) \
+ ICU(StoreCallbackProperty) \
+ ICU(LoadPropertyWithInterceptorOnly) \
+ ICU(LoadPropertyWithInterceptorForLoad) \
+ ICU(LoadPropertyWithInterceptorForCall) \
ICU(StoreInterceptorProperty)
//
@@ -387,6 +389,10 @@ class KeyedStoreIC: public IC {
// Support for patching the map that is checked in an inlined
// version of keyed store.
+ // The address is the patch point for the IC call
+ // (Assembler::kTargetAddrToReturnAddrDist before the end of
+ // the call/return address).
+ // The map is the new map that the inlined code should check against.
static bool PatchInlinedStore(Address address, Object* map);
friend class IC;
diff --git a/V8Binding/v8/src/jsregexp-inl.h b/V8Binding/v8/src/jsregexp-inl.h
deleted file mode 100644
index cc90bd1..0000000
--- a/V8Binding/v8/src/jsregexp-inl.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSREGEXP_INL_H_
-#define V8_JSREGEXP_INL_H_
-
-
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new Node(key, C::kNoValue);
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = C::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new Node(key, C::kNoValue);
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = NULL;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = NULL;
- }
- root_ = node;
- }
- locator->bind(root_);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
- if (is_empty())
- return false;
- Splay(key);
- if (C::Compare(key, root_->key_) == 0) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = C::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = C::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != NULL)
- current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != NULL)
- current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Remove(const Key& key) {
- // Bail if the tree is empty
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key to the top.
- Splay(key);
- // Bail if the key is not in the tree
- if (C::Compare(key, root_->key_) != 0)
- return false;
- if (root_->left_ == NULL) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
- return true;
-}
-
-
-template <typename C>
-void ZoneSplayTree<C>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(C::kNoKey, C::kNoValue);
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = C::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == NULL)
- break;
- if (C::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == NULL)
- break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == NULL)
- break;
- if (C::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == NULL)
- break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback) {
- if (node == NULL) return;
- DoForEach<Node, Callback>(node->left(), callback);
- callback->Call(node->key(), node->value());
- DoForEach<Node, Callback>(node->right(), callback);
-}
-
-
-}} // namespace v8::internal
-
-
-#endif // V8_JSREGEXP_INL_H_
diff --git a/V8Binding/v8/src/jsregexp.cc b/V8Binding/v8/src/jsregexp.cc
index 852d431..bd51102 100644
--- a/V8Binding/v8/src/jsregexp.cc
+++ b/V8Binding/v8/src/jsregexp.cc
@@ -31,7 +31,7 @@
#include "compiler.h"
#include "execution.h"
#include "factory.h"
-#include "jsregexp-inl.h"
+#include "jsregexp.h"
#include "platform.h"
#include "runtime.h"
#include "top.h"
@@ -254,7 +254,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
{
NoHandleAllocation no_handles;
- FixedArray* array = last_match_info->elements();
+ FixedArray* array = FixedArray::cast(last_match_info->elements());
SetAtomLastCapture(array, *subject, value, value + needle->length());
}
return last_match_info;
@@ -442,7 +442,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value();
- array = Handle<FixedArray>(last_match_info->elements());
+ array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
@@ -475,7 +475,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
return Factory::null_value();
}
- array = Handle<FixedArray>(last_match_info->elements());
+ array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
diff --git a/V8Binding/v8/src/jsregexp.h b/V8Binding/v8/src/jsregexp.h
index 0e7965c..3bc30b6 100644
--- a/V8Binding/v8/src/jsregexp.h
+++ b/V8Binding/v8/src/jsregexp.h
@@ -214,108 +214,6 @@ class CharacterRange {
};
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback);
-
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
-template <typename Config>
-class ZoneSplayTree : public ZoneObject {
- public:
- typedef typename Config::Key Key;
- typedef typename Config::Value Value;
-
- class Locator;
-
- ZoneSplayTree() : root_(NULL) { }
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- bool is_empty() { return root_ == NULL; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node : public ZoneObject {
- public:
- Node(const Key& key, const Value& value)
- : key_(key),
- value_(value),
- left_(NULL),
- right_(NULL) { }
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
- private:
- friend class ZoneSplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator {
- public:
- explicit Locator(Node* node) : node_(node) { }
- Locator() : node_(NULL) { }
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* c) {
- DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
- }
-
- private:
- Node* root_;
-};
-
-
// A set of unsigned integers that behaves especially well on small
// integers (< 32). May do zone-allocation.
class OutSet: public ZoneObject {
diff --git a/V8Binding/v8/src/log.cc b/V8Binding/v8/src/log.cc
index 33cf8e2..5680820 100644
--- a/V8Binding/v8/src/log.cc
+++ b/V8Binding/v8/src/log.cc
@@ -884,6 +884,21 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
}
+void Logger::HeapSampleJSConstructorEvent(const char* constructor,
+ int number, int bytes) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
+ LogMessageBuilder msg;
+ msg.Append("heap-js-cons-item,%s,%d,%d\n",
+ constructor != NULL ?
+ (constructor[0] != '\0' ? constructor : "(anonymous)") :
+ "(no_constructor)",
+ number, bytes);
+ msg.WriteToLogFile();
+#endif
+}
+
+
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
@@ -942,38 +957,63 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
-bool Logger::IsProfilerPaused() {
- return profiler_->paused();
+int Logger::GetActiveProfilerModules() {
+ int result = PROFILER_MODULE_NONE;
+ if (!profiler_->paused()) {
+ result |= PROFILER_MODULE_CPU;
+ }
+ if (FLAG_log_gc) {
+ result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
+ }
+ return result;
}
-void Logger::PauseProfiler() {
- if (profiler_->paused()) {
- return;
+void Logger::PauseProfiler(int flags) {
+ if (!Log::IsEnabled()) return;
+ const int active_modules = GetActiveProfilerModules();
+ const int modules_to_disable = active_modules & flags;
+ if (modules_to_disable == PROFILER_MODULE_NONE) return;
+
+ if (modules_to_disable & PROFILER_MODULE_CPU) {
+ profiler_->pause();
+ if (FLAG_prof_lazy) {
+ if (!FLAG_sliding_state_window) ticker_->Stop();
+ FLAG_log_code = false;
+ // Must be the same message as Log::kDynamicBufferSeal.
+ LOG(UncheckedStringEvent("profiler", "pause"));
+ }
}
- profiler_->pause();
- if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window) ticker_->Stop();
- FLAG_log_code = false;
- // Must be the same message as Log::kDynamicBufferSeal.
- LOG(UncheckedStringEvent("profiler", "pause"));
+ if (modules_to_disable &
+ (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+ FLAG_log_gc = false;
+ }
+ // Turn off logging if no active modules remain.
+ if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
+ is_logging_ = false;
}
- is_logging_ = false;
}
-void Logger::ResumeProfiler() {
- if (!profiler_->paused() || !Log::IsEnabled()) {
- return;
+void Logger::ResumeProfiler(int flags) {
+ if (!Log::IsEnabled()) return;
+ const int modules_to_enable = ~GetActiveProfilerModules() & flags;
+ if (modules_to_enable != PROFILER_MODULE_NONE) {
+ is_logging_ = true;
}
- is_logging_ = true;
- if (FLAG_prof_lazy) {
- LOG(UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- if (!FLAG_sliding_state_window) ticker_->Start();
+ if (modules_to_enable & PROFILER_MODULE_CPU) {
+ if (FLAG_prof_lazy) {
+ LOG(UncheckedStringEvent("profiler", "resume"));
+ FLAG_log_code = true;
+ LogCompiledFunctions();
+ if (!FLAG_sliding_state_window) ticker_->Start();
+ }
+ profiler_->resume();
+ }
+ if (modules_to_enable &
+ (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
+ FLAG_log_gc = true;
}
- profiler_->resume();
}
@@ -981,7 +1021,7 @@ void Logger::ResumeProfiler() {
// either from main or Profiler's thread.
void Logger::StopLoggingAndProfiling() {
Log::stop();
- PauseProfiler();
+ PauseProfiler(PROFILER_MODULE_CPU);
}
@@ -1111,8 +1151,8 @@ bool Logger::Setup() {
break;
case 't': {
// %t expands to the current time in milliseconds.
- uint32_t time = static_cast<uint32_t>(OS::TimeCurrentMillis());
- stream.Add("%u", time);
+ double time = OS::TimeCurrentMillis();
+ stream.Add("%.0f", FmtElm(time));
break;
}
case '%':
diff --git a/V8Binding/v8/src/log.h b/V8Binding/v8/src/log.h
index 95c9cde..89f6cdb 100644
--- a/V8Binding/v8/src/log.h
+++ b/V8Binding/v8/src/log.h
@@ -219,6 +219,8 @@ class Logger {
static void HeapSampleBeginEvent(const char* space, const char* kind);
static void HeapSampleEndEvent(const char* space, const char* kind);
static void HeapSampleItemEvent(const char* type, int number, int bytes);
+ static void HeapSampleJSConstructorEvent(const char* constructor,
+ int number, int bytes);
static void HeapSampleStats(const char* space, const char* kind,
int capacity, int used);
@@ -247,11 +249,11 @@ class Logger {
}
// Pause/Resume collection of profiling data.
- // When data collection is paused, Tick events are discarded until
+ // When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- static bool IsProfilerPaused();
- static void PauseProfiler();
- static void ResumeProfiler();
+ static void PauseProfiler(int flags);
+ static void ResumeProfiler(int flags);
+ static int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js
index 870c969..fd505ff 100644
--- a/V8Binding/v8/src/messages.js
+++ b/V8Binding/v8/src/messages.js
@@ -561,20 +561,24 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
var kAddMessageAccessorsMarker = { };
// Defines accessors for a property that is calculated the first time
-// the property is read and then replaces the accessor with the value.
-// Also, setting the property causes the accessors to be deleted.
+// the property is read.
function DefineOneShotAccessor(obj, name, fun) {
// Note that the accessors consistently operate on 'obj', not 'this'.
// Since the object may occur in someone else's prototype chain we
// can't rely on 'this' being the same as 'obj'.
+ var hasBeenSet = false;
+ var value;
obj.__defineGetter__(name, function () {
- var value = fun(obj);
- obj[name] = value;
+ if (hasBeenSet) {
+ return value;
+ }
+ hasBeenSet = true;
+ value = fun(obj);
return value;
});
obj.__defineSetter__(name, function (v) {
- delete obj[name];
- obj[name] = v;
+ hasBeenSet = true;
+ value = v;
});
}
@@ -833,22 +837,25 @@ function DefineError(f) {
} else if (!IS_UNDEFINED(m)) {
this.message = ToString(m);
}
- var stackTraceLimit = $Error.stackTraceLimit;
- if (stackTraceLimit) {
- // Cap the limit to avoid extremely big traces
- if (stackTraceLimit < 0 || stackTraceLimit > 10000)
- stackTraceLimit = 10000;
- var raw_stack = %CollectStackTrace(f, stackTraceLimit);
- DefineOneShotAccessor(this, 'stack', function (obj) {
- return FormatRawStackTrace(obj, raw_stack);
- });
- }
+ captureStackTrace(this, f);
} else {
return new f(m);
}
});
}
+function captureStackTrace(obj, cons_opt) {
+ var stackTraceLimit = $Error.stackTraceLimit;
+ if (!stackTraceLimit) return;
+ if (stackTraceLimit < 0 || stackTraceLimit > 10000)
+ stackTraceLimit = 10000;
+ var raw_stack = %CollectStackTrace(cons_opt ? cons_opt : captureStackTrace,
+ stackTraceLimit);
+ DefineOneShotAccessor(obj, 'stack', function (obj) {
+ return FormatRawStackTrace(obj, raw_stack);
+ });
+};
+
$Math.__proto__ = global.Object.prototype;
DefineError(function Error() { });
@@ -859,6 +866,8 @@ DefineError(function ReferenceError() { });
DefineError(function EvalError() { });
DefineError(function URIError() { });
+$Error.captureStackTrace = captureStackTrace;
+
// Setup extra properties of the Error.prototype object.
$Error.prototype.message = '';
diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc
index 8c57afd..40001f9 100644
--- a/V8Binding/v8/src/objects-debug.cc
+++ b/V8Binding/v8/src/objects-debug.cc
@@ -115,6 +115,9 @@ void HeapObject::HeapObjectPrint() {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint();
break;
+ case PIXEL_ARRAY_TYPE:
+ PixelArray::cast(this)->PixelArrayPrint();
+ break;
case FILLER_TYPE:
PrintF("filler");
break;
@@ -191,6 +194,9 @@ void HeapObject::HeapObjectVerify() {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
+ case PIXEL_ARRAY_TYPE:
+ PixelArray::cast(this)->PixelArrayVerify();
+ break;
case CODE_TYPE:
Code::cast(this)->CodeVerify();
break;
@@ -264,11 +270,21 @@ void ByteArray::ByteArrayPrint() {
}
+void PixelArray::PixelArrayPrint() {
+ PrintF("pixel array");
+}
+
+
void ByteArray::ByteArrayVerify() {
ASSERT(IsByteArray());
}
+void PixelArray::PixelArrayVerify() {
+ ASSERT(IsPixelArray());
+}
+
+
void JSObject::PrintProperties() {
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
@@ -312,15 +328,30 @@ void JSObject::PrintProperties() {
void JSObject::PrintElements() {
- if (HasFastElements()) {
- FixedArray* p = FixedArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- PrintF(" %d: ", i);
- p->get(i)->ShortPrint();
- PrintF("\n");
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ // Print in array notation for non-sparse arrays.
+ FixedArray* p = FixedArray::cast(elements());
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(" %d: ", i);
+ p->get(i)->ShortPrint();
+ PrintF("\n");
+ }
+ break;
}
- } else {
- elements()->Print();
+ case PIXEL_ELEMENTS: {
+ PixelArray* p = PixelArray::cast(elements());
+ for (int i = 0; i < p->length(); i++) {
+ PrintF(" %d: %d\n", i, p->get(i));
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS:
+ elements()->Print();
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -402,6 +433,7 @@ static const char* TypeToString(InstanceType type) {
case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+ case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
case FILLER_TYPE: return "FILLER";
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@@ -666,7 +698,7 @@ void Oddball::OddballVerify() {
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- ASSERT(value == 0 || value == 1 || value == -1);
+ ASSERT(value == 0 || value == 1 || value == -1 || value == -2);
}
}
@@ -1015,21 +1047,35 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
}
// Indexed properties
- if (HasFastElements()) {
- info->number_of_objects_with_fast_elements_++;
- int holes = 0;
- FixedArray* e = FixedArray::cast(elements());
- int len = e->length();
- for (int i = 0; i < len; i++) {
- if (e->get(i) == Heap::the_hole_value()) holes++;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ info->number_of_objects_with_fast_elements_++;
+ int holes = 0;
+ FixedArray* e = FixedArray::cast(elements());
+ int len = e->length();
+ for (int i = 0; i < len; i++) {
+ if (e->get(i) == Heap::the_hole_value()) holes++;
+ }
+ info->number_of_fast_used_elements_ += len - holes;
+ info->number_of_fast_unused_elements_ += holes;
+ break;
}
- info->number_of_fast_used_elements_ += len - holes;
- info->number_of_fast_unused_elements_ += holes;
- } else {
- NumberDictionary* dict = element_dictionary();
- info->number_of_slow_used_elements_ += dict->NumberOfElements();
- info->number_of_slow_unused_elements_ +=
- dict->Capacity() - dict->NumberOfElements();
+ case PIXEL_ELEMENTS: {
+ info->number_of_objects_with_fast_elements_++;
+ PixelArray* e = PixelArray::cast(elements());
+ info->number_of_fast_used_elements_ += e->length();
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dict = element_dictionary();
+ info->number_of_slow_used_elements_ += dict->NumberOfElements();
+ info->number_of_slow_unused_elements_ +=
+ dict->Capacity() - dict->NumberOfElements();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
diff --git a/V8Binding/v8/src/objects-inl.h b/V8Binding/v8/src/objects-inl.h
index 7abc7c3..a3bd3ce 100644
--- a/V8Binding/v8/src/objects-inl.h
+++ b/V8Binding/v8/src/objects-inl.h
@@ -100,6 +100,25 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
+bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
+ // There is a constraint on the object; check.
+ if (!this->IsJSObject()) return false;
+ // Fetch the constructor function of the object.
+ Object* cons_obj = JSObject::cast(this)->map()->constructor();
+ if (!cons_obj->IsJSFunction()) return false;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ // Iterate through the chain of inheriting function templates to
+ // see if the required one occurs.
+ for (Object* type = fun->shared()->function_data();
+ type->IsFunctionTemplateInfo();
+ type = FunctionTemplateInfo::cast(type)->parent_template()) {
+ if (type == expected) return true;
+ }
+ // Didn't find the required type in the inheritance chain.
+ return false;
+}
+
+
bool Object::IsSmi() {
return HAS_SMI_TAG(this);
}
@@ -321,6 +340,12 @@ bool Object::IsByteArray() {
}
+bool Object::IsPixelArray() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() == PIXEL_ARRAY_TYPE;
+}
+
+
bool Object::IsFailure() {
return HAS_FAILURE_TAG(this);
}
@@ -1043,7 +1068,22 @@ void HeapNumber::set_value(double value) {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
-ACCESSORS(JSObject, elements, FixedArray, kElementsOffset)
+
+
+Array* JSObject::elements() {
+ Object* array = READ_FIELD(this, kElementsOffset);
+ // In the assert below Dictionary is covered under FixedArray.
+ ASSERT(array->IsFixedArray() || array->IsPixelArray());
+ return reinterpret_cast<Array*>(array);
+}
+
+
+void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
+ // In the assert below Dictionary is covered under FixedArray.
+ ASSERT(value->IsFixedArray() || value->IsPixelArray());
+ WRITE_FIELD(this, kElementsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
+}
void JSObject::initialize_properties() {
@@ -1502,6 +1542,7 @@ CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(Proxy)
CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(PixelArray)
CAST_ACCESSOR(Struct)
@@ -1860,6 +1901,32 @@ Address ByteArray::GetDataStartAddress() {
}
+uint8_t* PixelArray::external_pointer() {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+ return reinterpret_cast<uint8_t*>(ptr);
+}
+
+
+void PixelArray::set_external_pointer(uint8_t* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+
+uint8_t PixelArray::get(int index) {
+ ASSERT((index >= 0) && (index < this->length()));
+ uint8_t* ptr = external_pointer();
+ return ptr[index];
+}
+
+
+void PixelArray::set(int index, uint8_t value) {
+ ASSERT((index >= 0) && (index < this->length()));
+ uint8_t* ptr = external_pointer();
+ ptr[index] = value;
+}
+
+
int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
}
@@ -2293,6 +2360,11 @@ bool JSFunction::IsBoilerplate() {
}
+bool JSFunction::IsBuiltin() {
+ return context()->global()->IsJSBuiltinsObject();
+}
+
+
bool JSObject::IsLoaded() {
return !map()->needs_loading();
}
@@ -2523,8 +2595,33 @@ void JSRegExp::SetDataAt(int index, Object* value) {
}
+JSObject::ElementsKind JSObject::GetElementsKind() {
+ Array* array = elements();
+ if (array->IsFixedArray()) {
+ // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
+ if (array->map() == Heap::fixed_array_map()) {
+ return FAST_ELEMENTS;
+ }
+ ASSERT(array->IsDictionary());
+ return DICTIONARY_ELEMENTS;
+ }
+ ASSERT(array->IsPixelArray());
+ return PIXEL_ELEMENTS;
+}
+
+
bool JSObject::HasFastElements() {
- return !elements()->IsDictionary();
+ return GetElementsKind() == FAST_ELEMENTS;
+}
+
+
+bool JSObject::HasDictionaryElements() {
+ return GetElementsKind() == DICTIONARY_ELEMENTS;
+}
+
+
+bool JSObject::HasPixelElements() {
+ return GetElementsKind() == PIXEL_ELEMENTS;
}
@@ -2545,7 +2642,7 @@ StringDictionary* JSObject::property_dictionary() {
NumberDictionary* JSObject::element_dictionary() {
- ASSERT(!HasFastElements());
+ ASSERT(HasDictionaryElements());
return NumberDictionary::cast(elements());
}
@@ -2651,24 +2748,6 @@ bool JSObject::HasElement(uint32_t index) {
}
-Smi* JSObject::InterceptorPropertyLookupHint(String* name) {
- // TODO(antonm): Do we want to do any shortcuts for global object?
- if (HasFastProperties()) {
- LookupResult lookup;
- LocalLookupRealNamedProperty(name, &lookup);
- if (lookup.IsValid()) {
- if (lookup.type() == FIELD && lookup.IsCacheable()) {
- return Smi::FromInt(lookup.GetFieldIndex());
- }
- } else {
- return Smi::FromInt(kLookupInPrototype);
- }
- }
-
- return Smi::FromInt(kLookupInHolder);
-}
-
-
bool AccessorInfo::all_can_read() {
return BooleanBit::get(flag(), kAllCanReadBit);
}
diff --git a/V8Binding/v8/src/objects.cc b/V8Binding/v8/src/objects.cc
index 72412c1..c3051b8 100644
--- a/V8Binding/v8/src/objects.cc
+++ b/V8Binding/v8/src/objects.cc
@@ -50,24 +50,6 @@ namespace internal {
const int kGetterIndex = 0;
const int kSetterIndex = 1;
-bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
- // There is a constraint on the object; check
- if (!this->IsJSObject()) return false;
- // Fetch the constructor function of the object
- Object* cons_obj = JSObject::cast(this)->map()->constructor();
- if (!cons_obj->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(cons_obj);
- // Iterate through the chain of inheriting function templates to
- // see if the required one occurs.
- for (Object* type = fun->shared()->function_data();
- type->IsFunctionTemplateInfo();
- type = FunctionTemplateInfo::cast(type)->parent_template()) {
- if (type == expected) return true;
- }
- // Didn't find the required type in the inheritance chain.
- return false;
-}
-
static Object* CreateJSValue(JSFunction* constructor, Object* value) {
Object* result = Heap::AllocateJSObject(constructor);
@@ -1006,6 +988,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
case BYTE_ARRAY_TYPE:
accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
break;
+ case PIXEL_ARRAY_TYPE:
+ accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
+ break;
case SHARED_FUNCTION_INFO_TYPE:
accumulator->Add("<SharedFunctionInfo>");
break;
@@ -1147,6 +1132,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
+ case PIXEL_ARRAY_TYPE:
break;
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
@@ -1240,7 +1226,7 @@ Object* JSObject::AddFastProperty(String* name,
// hidden symbols) and is not a real identifier.
StringInputBuffer buffer(name);
if (!Scanner::IsIdentifier(&buffer) && name != Heap::hidden_symbol()) {
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
}
@@ -1278,7 +1264,7 @@ Object* JSObject::AddFastProperty(String* name,
if (map()->unused_property_fields() == 0) {
if (properties()->length() > kMaxFastProperties) {
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
}
@@ -1399,7 +1385,7 @@ Object* JSObject::AddProperty(String* name,
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
}
}
@@ -1469,7 +1455,7 @@ Object* JSObject::ConvertDescriptorToField(String* name,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
properties()->length() > kMaxFastProperties) {
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return ReplaceSlowProperty(name, new_value, attributes);
}
@@ -1669,7 +1655,9 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
for (Object* pt = GetPrototype();
pt != Heap::null_value();
pt = pt->GetPrototype()) {
- if (JSObject::cast(pt)->HasFastElements()) continue;
+ if (!JSObject::cast(pt)->HasDictionaryElements()) {
+ continue;
+ }
NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
@@ -2118,12 +2106,22 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
}
-Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
+Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
+ int expected_additional_properties) {
if (!HasFastProperties()) return this;
- // Allocate new content
+ // The global object is always normalized.
+ ASSERT(!IsGlobalObject());
+
+ // Allocate new content.
+ int property_count = map()->NumberOfDescribedProperties();
+ if (expected_additional_properties > 0) {
+ property_count += expected_additional_properties;
+ } else {
+ property_count += 2; // Make space for two more properties.
+ }
Object* obj =
- StringDictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4);
+ StringDictionary::Allocate(property_count * 2);
if (obj->IsFailure()) return obj;
StringDictionary* dictionary = StringDictionary::cast(obj);
@@ -2135,10 +2133,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = descs->GetConstantFunction(i);
- if (IsGlobalObject()) {
- value = Heap::AllocateJSGlobalPropertyCell(value);
- if (value->IsFailure()) return value;
- }
Object* result = dictionary->Add(descs->GetKey(i), value, d);
if (result->IsFailure()) return result;
dictionary = StringDictionary::cast(result);
@@ -2148,10 +2142,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
- if (IsGlobalObject()) {
- value = Heap::AllocateJSGlobalPropertyCell(value);
- if (value->IsFailure()) return value;
- }
Object* result = dictionary->Add(descs->GetKey(i), value, d);
if (result->IsFailure()) return result;
dictionary = StringDictionary::cast(result);
@@ -2161,10 +2151,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
Object* value = descs->GetCallbacksObject(i);
- if (IsGlobalObject()) {
- value = Heap::AllocateJSGlobalPropertyCell(value);
- if (value->IsFailure()) return value;
- }
Object* result = dictionary->Add(descs->GetKey(i), value, d);
if (result->IsFailure()) return result;
dictionary = StringDictionary::cast(result);
@@ -2176,9 +2162,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) {
case INTERCEPTOR:
break;
default:
- case NORMAL:
UNREACHABLE();
- break;
}
}
@@ -2231,7 +2215,8 @@ Object* JSObject::TransformToFastProperties(int unused_property_fields) {
Object* JSObject::NormalizeElements() {
- if (!HasFastElements()) return this;
+ ASSERT(!HasPixelElements());
+ if (HasDictionaryElements()) return this;
// Get number of entries.
FixedArray* array = FixedArray::cast(elements());
@@ -2276,7 +2261,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) {
if (!result.IsValid()) return Heap::true_value();
// Normalize object if needed.
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return DeleteNormalizedProperty(name, mode);
@@ -2317,20 +2302,28 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) {
Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
DeleteMode mode) {
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
+ ASSERT(!HasPixelElements());
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
+ if (index < length) {
+ FixedArray::cast(elements())->set_the_hole(index);
+ }
+ break;
}
- return Heap::true_value();
- }
- ASSERT(!HasFastElements());
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ return dictionary->DeleteProperty(entry, mode);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
return Heap::true_value();
}
@@ -2392,20 +2385,31 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
return DeleteElementWithInterceptor(index);
}
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
+ if (index < length) {
+ FixedArray::cast(elements())->set_the_hole(index);
+ }
+ break;
}
- return Heap::true_value();
- } else {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
+ case PIXEL_ELEMENTS: {
+ // Pixel elements cannot be deleted. Just silently ignore here.
+ break;
}
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ return dictionary->DeleteProperty(entry, mode);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
return Heap::true_value();
}
@@ -2454,7 +2458,7 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) {
mode);
}
// Normalize object if needed.
- Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
// Make sure the properties are normalized before removing the entry.
return DeleteNormalizedProperty(name, mode);
@@ -2483,21 +2487,32 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check if the object is among the indexed properties.
- if (HasFastElements()) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- Object* element = FixedArray::cast(elements())->get(i);
- if (!element->IsTheHole() && element == obj) {
- return true;
+ switch (GetElementsKind()) {
+ case PIXEL_ELEMENTS:
+ // Raw pixels do not reference other objects.
+ break;
+ case FAST_ELEMENTS: {
+ int length = IsJSArray() ?
+ Smi::cast(JSArray::cast(this)->length())->value() :
+ FixedArray::cast(elements())->length();
+ for (int i = 0; i < length; i++) {
+ Object* element = FixedArray::cast(elements())->get(i);
+ if (!element->IsTheHole() && element == obj) {
+ return true;
+ }
}
+ break;
}
- } else {
- key = element_dictionary()->SlowReverseLookup(obj);
- if (key != Heap::undefined_value()) {
- return true;
+ case DICTIONARY_ELEMENTS: {
+ key = element_dictionary()->SlowReverseLookup(obj);
+ if (key != Heap::undefined_value()) {
+ return true;
+ }
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
// For functions check the context. Boilerplate functions do
@@ -2715,20 +2730,31 @@ Object* JSObject::DefineGetterSetter(String* name,
if (is_element && IsJSArray()) return Heap::undefined_value();
if (is_element) {
- // Lookup the index.
- if (!HasFastElements()) {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return Heap::undefined_value();
- if (details.type() == CALLBACKS) {
- // Only accessors allowed as elements.
- ASSERT(result->IsFixedArray());
- return result;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS:
+ break;
+ case PIXEL_ELEMENTS:
+ // Ignore getters and setters on pixel elements.
+ return Heap::undefined_value();
+ case DICTIONARY_ELEMENTS: {
+ // Lookup the index.
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* result = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.IsReadOnly()) return Heap::undefined_value();
+ if (details.type() == CALLBACKS) {
+ // Only accessors allowed as elements.
+ ASSERT(result->IsFixedArray());
+ return result;
+ }
}
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
} else {
// Lookup the name.
@@ -2765,7 +2791,7 @@ Object* JSObject::DefineGetterSetter(String* name,
set_elements(NumberDictionary::cast(dict));
} else {
// Normalize object to make this operation simple.
- Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (ok->IsFailure()) return ok;
// For the global object allocate a new map to invalidate the global inline
@@ -2827,9 +2853,9 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) {
for (Object* obj = this;
obj != Heap::null_value();
obj = JSObject::cast(obj)->GetPrototype()) {
- JSObject* jsObject = JSObject::cast(obj);
- if (!jsObject->HasFastElements()) {
- NumberDictionary* dictionary = jsObject->element_dictionary();
+ JSObject* js_object = JSObject::cast(obj);
+ if (js_object->HasDictionaryElements()) {
+ NumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
@@ -3029,28 +3055,35 @@ static bool HasKey(FixedArray* array, Object* key) {
Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
- if (array->HasFastElements()) {
- return UnionOfKeys(array->elements());
- }
- ASSERT(!array->HasFastElements());
- NumberDictionary* dict = array->element_dictionary();
- int size = dict->NumberOfElements();
-
- // Allocate a temporary fixed array.
- Object* object = Heap::AllocateFixedArray(size);
- if (object->IsFailure()) return object;
- FixedArray* key_array = FixedArray::cast(object);
-
- int capacity = dict->Capacity();
- int pos = 0;
- // Copy the elements from the JSArray to the temporary fixed array.
- for (int i = 0; i < capacity; i++) {
- if (dict->IsKey(dict->KeyAt(i))) {
- key_array->set(pos++, dict->ValueAt(i));
+ ASSERT(!array->HasPixelElements());
+ switch (array->GetElementsKind()) {
+ case JSObject::FAST_ELEMENTS:
+ return UnionOfKeys(FixedArray::cast(array->elements()));
+ case JSObject::DICTIONARY_ELEMENTS: {
+ NumberDictionary* dict = array->element_dictionary();
+ int size = dict->NumberOfElements();
+
+ // Allocate a temporary fixed array.
+ Object* object = Heap::AllocateFixedArray(size);
+ if (object->IsFailure()) return object;
+ FixedArray* key_array = FixedArray::cast(object);
+
+ int capacity = dict->Capacity();
+ int pos = 0;
+ // Copy the elements from the JSArray to the temporary fixed array.
+ for (int i = 0; i < capacity; i++) {
+ if (dict->IsKey(dict->KeyAt(i))) {
+ key_array->set(pos++, dict->ValueAt(i));
+ }
+ }
+ // Compute the union of this and the temporary fixed array.
+ return UnionOfKeys(key_array);
}
+ default:
+ UNREACHABLE();
}
- // Compute the union of this and the temporary fixed array.
- return UnionOfKeys(key_array);
+ UNREACHABLE();
+ return Heap::null_value(); // Failure case needs to "return" a value.
}
@@ -5089,54 +5122,74 @@ void Code::Disassemble(const char* name) {
void JSObject::SetFastElements(FixedArray* elems) {
+ // We should never end in here with a pixel array.
+ ASSERT(!HasPixelElements());
#ifdef DEBUG
// Check the provided array is filled with the_hole.
uint32_t len = static_cast<uint32_t>(elems->length());
for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
#endif
WriteBarrierMode mode = elems->GetWriteBarrierMode();
- if (HasFastElements()) {
- FixedArray* old_elements = FixedArray::cast(elements());
- uint32_t old_length = static_cast<uint32_t>(old_elements->length());
- // Fill out the new array with this content and array holes.
- for (uint32_t i = 0; i < old_length; i++) {
- elems->set(i, old_elements->get(i), mode);
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ FixedArray* old_elements = FixedArray::cast(elements());
+ uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+ // Fill out the new array with this content and array holes.
+ for (uint32_t i = 0; i < old_length; i++) {
+ elems->set(i, old_elements->get(i), mode);
+ }
+ break;
}
- } else {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- elems->set(entry, dictionary->ValueAt(i), mode);
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ elems->set(entry, dictionary->ValueAt(i), mode);
+ }
}
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
set_elements(elems);
}
Object* JSObject::SetSlowElements(Object* len) {
+ // We should never end in here with a pixel array.
+ ASSERT(!HasPixelElements());
+
uint32_t new_length = static_cast<uint32_t>(len->Number());
- if (!HasFastElements()) {
- if (IsJSArray()) {
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(new_length, old_length),
- JSArray::cast(this)->set_length(len);
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ // Make sure we never try to shrink dense arrays into sparse arrays.
+ ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
+ new_length);
+ Object* obj = NormalizeElements();
+ if (obj->IsFailure()) return obj;
+
+ // Update length for JSArrays.
+ if (IsJSArray()) JSArray::cast(this)->set_length(len);
+ break;
}
- return this;
+ case DICTIONARY_ELEMENTS: {
+ if (IsJSArray()) {
+ uint32_t old_length =
+ static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+ element_dictionary()->RemoveNumberEntries(new_length, old_length),
+ JSArray::cast(this)->set_length(len);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
-
- // Make sure we never try to shrink dense arrays into sparse arrays.
- ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
- new_length);
- Object* obj = NormalizeElements();
- if (obj->IsFailure()) return obj;
-
- // Update length for JSArrays.
- if (IsJSArray()) JSArray::cast(this)->set_length(len);
return this;
}
@@ -5159,7 +5212,7 @@ Object* JSArray::Initialize(int capacity) {
void JSArray::Expand(int required_size) {
Handle<JSArray> self(this);
- Handle<FixedArray> old_backing(elements());
+ Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
// Doubling in size would be overkill, but leave some slack to avoid
// constantly growing.
@@ -5186,52 +5239,62 @@ static Object* ArrayLengthRangeError() {
Object* JSObject::SetElementsLength(Object* len) {
+ // We should never end in here with a pixel array.
+ ASSERT(!HasPixelElements());
+
Object* smi_length = len->ToSmi();
if (smi_length->IsSmi()) {
int value = Smi::cast(smi_length)->value();
if (value < 0) return ArrayLengthRangeError();
- if (HasFastElements()) {
- int old_capacity = FixedArray::cast(elements())->length();
- if (value <= old_capacity) {
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ int old_capacity = FixedArray::cast(elements())->length();
+ if (value <= old_capacity) {
+ if (IsJSArray()) {
+ int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+ // NOTE: We may be able to optimize this by removing the
+ // last part of the elements backing storage array and
+ // setting the capacity to the new size.
+ for (int i = value; i < old_length; i++) {
+ FixedArray::cast(elements())->set_the_hole(i);
+ }
+ JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
+ }
+ return this;
+ }
+ int min = NewElementsCapacity(old_capacity);
+ int new_capacity = value > min ? value : min;
+ if (new_capacity <= kMaxFastElementsLength ||
+ !ShouldConvertToSlowElements(new_capacity)) {
+ Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+ if (obj->IsFailure()) return obj;
+ if (IsJSArray()) JSArray::cast(this)->set_length(smi_length,
+ SKIP_WRITE_BARRIER);
+ SetFastElements(FixedArray::cast(obj));
+ return this;
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
if (IsJSArray()) {
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- // NOTE: We may be able to optimize this by removing the
- // last part of the elements backing storage array and
- // setting the capacity to the new size.
- for (int i = value; i < old_length; i++) {
- FixedArray::cast(elements())->set_the_hole(i);
+ if (value == 0) {
+ // If the length of a slow array is reset to zero, we clear
+ // the array and flush backing storage. This has the added
+ // benefit that the array returns to fast mode.
+ initialize_elements();
+ } else {
+ // Remove deleted elements.
+ uint32_t old_length =
+ static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+ element_dictionary()->RemoveNumberEntries(value, old_length);
}
JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
}
return this;
}
- int min = NewElementsCapacity(old_capacity);
- int new_capacity = value > min ? value : min;
- if (new_capacity <= kMaxFastElementsLength ||
- !ShouldConvertToSlowElements(new_capacity)) {
- Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
- if (obj->IsFailure()) return obj;
- if (IsJSArray()) JSArray::cast(this)->set_length(smi_length,
- SKIP_WRITE_BARRIER);
- SetFastElements(FixedArray::cast(obj));
- return this;
- }
- } else {
- if (IsJSArray()) {
- if (value == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- initialize_elements();
- } else {
- // Remove deleted elements.
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(value, old_length);
- }
- JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER);
- }
- return this;
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -5258,20 +5321,36 @@ Object* JSObject::SetElementsLength(Object* len) {
bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return true;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+ return true;
+ }
+ break;
}
- } else {
- if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
- return true;
+ case PIXEL_ELEMENTS: {
+ // TODO(iposva): Add testcase.
+ PixelArray* pixels = PixelArray::cast(elements());
+ if (index < static_cast<uint32_t>(pixels->length())) {
+ return true;
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ if (element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound) {
+ return true;
+ }
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
// Handle [] on String objects.
@@ -5338,17 +5417,29 @@ bool JSObject::HasLocalElement(uint32_t index) {
// Handle [] on String objects.
if (this->IsStringObjectWithCharacterAt(index)) return true;
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
- } else {
- return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ return (index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole();
+ }
+ case PIXEL_ELEMENTS: {
+ PixelArray* pixels = PixelArray::cast(elements());
+ return (index < static_cast<uint32_t>(pixels->length()));
+ }
+ case DICTIONARY_ELEMENTS: {
+ return element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
+ UNREACHABLE();
+ return Heap::null_value();
}
@@ -5365,18 +5456,33 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
return HasElementWithInterceptor(receiver, index);
}
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
- } else {
- if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
- return true;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
+ break;
+ }
+ case PIXEL_ELEMENTS: {
+ PixelArray* pixels = PixelArray::cast(elements());
+ if (index < static_cast<uint32_t>(pixels->length())) {
+ return true;
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ if (element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound) {
+ return true;
+ }
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
// Handle [] on String objects.
@@ -5472,7 +5578,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
// Otherwise default to slow case.
Object* obj = NormalizeElements();
if (obj->IsFailure()) return obj;
- ASSERT(!HasFastElements());
+ ASSERT(HasDictionaryElements());
return SetElement(index, value);
}
@@ -5501,80 +5607,95 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
- // Fast case.
- if (HasFastElements()) return SetFastElement(index, value);
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS:
+ // Fast case.
+ return SetFastElement(index, value);
+ case PIXEL_ELEMENTS: {
+ PixelArray* pixels = PixelArray::cast(elements());
+ return pixels->SetValue(index, value);
+ }
+ case DICTIONARY_ELEMENTS: {
+ // Insert element in the dictionary.
+ FixedArray* elms = FixedArray::cast(elements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elms);
- // Dictionary case.
- ASSERT(!HasFastElements());
-
- // Insert element in the dictionary.
- FixedArray* elms = FixedArray::cast(elements());
- NumberDictionary* dictionary = NumberDictionary::cast(elms);
-
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- // Only accessors allowed as elements.
- FixedArray* structure = FixedArray::cast(element);
- if (structure->get(kSetterIndex)->IsJSFunction()) {
- JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
- return SetPropertyWithDefinedSetter(setter, value);
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ // Only accessors allowed as elements.
+ FixedArray* structure = FixedArray::cast(element);
+ if (structure->get(kSetterIndex)->IsJSFunction()) {
+ JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
+ return SetPropertyWithDefinedSetter(setter, value);
+ } else {
+ Handle<Object> self(this);
+ Handle<Object> key(Factory::NewNumberFromUint(index));
+ Handle<Object> args[2] = { key, self };
+ return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2)));
+ }
+ } else {
+ dictionary->UpdateMaxNumberKey(index);
+ dictionary->ValueAtPut(entry, value);
+ }
} else {
- Handle<Object> self(this);
- Handle<Object> key(Factory::NewNumberFromUint(index));
- Handle<Object> args[2] = { key, self };
- return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
- }
- } else {
- dictionary->UpdateMaxNumberKey(index);
- dictionary->ValueAtPut(entry, value);
- }
- } else {
- // Index not already used. Look for an accessor in the prototype chain.
- if (!IsJSArray()) {
- Object* setter = LookupCallbackSetterInPrototypes(index);
- if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+ // Index not already used. Look for an accessor in the prototype chain.
+ if (!IsJSArray()) {
+ Object* setter = LookupCallbackSetterInPrototypes(index);
+ if (setter->IsJSFunction()) {
+ return SetPropertyWithDefinedSetter(JSFunction::cast(setter),
+ value);
+ }
+ }
+ Object* result = dictionary->AtNumberPut(index, value);
+ if (result->IsFailure()) return result;
+ if (elms != FixedArray::cast(result)) {
+ set_elements(FixedArray::cast(result));
+ }
}
- }
- Object* result = dictionary->AtNumberPut(index, value);
- if (result->IsFailure()) return result;
- if (elms != FixedArray::cast(result)) {
- set_elements(FixedArray::cast(result));
- }
- }
- // Update the array length if this JSObject is an array.
- if (IsJSArray()) {
- JSArray* array = JSArray::cast(this);
- Object* return_value = array->JSArrayUpdateLengthFromIndex(index, value);
- if (return_value->IsFailure()) return return_value;
- }
+ // Update the array length if this JSObject is an array.
+ if (IsJSArray()) {
+ JSArray* array = JSArray::cast(this);
+ Object* return_value = array->JSArrayUpdateLengthFromIndex(index,
+ value);
+ if (return_value->IsFailure()) return return_value;
+ }
- // Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &new_length));
- JSArray::cast(this)->set_length(Smi::FromInt(new_length));
- } else {
- new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
- }
- Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
- if (obj->IsFailure()) return obj;
- SetFastElements(FixedArray::cast(obj));
+ // Attempt to put this object back in fast case.
+ if (ShouldConvertToFastElements()) {
+ uint32_t new_length = 0;
+ if (IsJSArray()) {
+ CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
+ &new_length));
+ JSArray::cast(this)->set_length(Smi::FromInt(new_length));
+ } else {
+ new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
+ }
+ Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
+ if (obj->IsFailure()) return obj;
+ SetFastElements(FixedArray::cast(obj));
#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements are fast case again:\n");
- Print();
- }
+ if (FLAG_trace_normalization) {
+ PrintF("Object elements are fast case again:\n");
+ Print();
+ }
#endif
- }
+ }
- return value;
+ return value;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // All possible cases have been handled above. Add a return to avoid the
+ // complaints from the compiler.
+ UNREACHABLE();
+ return Heap::null_value();
}
@@ -5597,32 +5718,45 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
- if (HasFastElements()) {
- FixedArray* elms = FixedArray::cast(elements());
- if (index < static_cast<uint32_t>(elms->length())) {
- Object* value = elms->get(index);
- if (!value->IsTheHole()) return value;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ FixedArray* elms = FixedArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ Object* value = elms->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ break;
}
- } else {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- // Only accessors allowed as elements.
- FixedArray* structure = FixedArray::cast(element);
- Object* getter = structure->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
- } else {
- // Getter is not a function.
- return Heap::undefined_value();
+ case PIXEL_ELEMENTS: {
+ // TODO(iposva): Add testcase and implement.
+ UNIMPLEMENTED();
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ // Only accessors allowed as elements.
+ FixedArray* structure = FixedArray::cast(element);
+ Object* getter = structure->get(kGetterIndex);
+ if (getter->IsJSFunction()) {
+ return GetPropertyWithDefinedGetter(receiver,
+ JSFunction::cast(getter));
+ } else {
+ // Getter is not a function.
+ return Heap::undefined_value();
+ }
}
+ return element;
}
- return element;
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
// Continue searching via the prototype chain.
@@ -5681,31 +5815,44 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
- if (HasFastElements()) {
- FixedArray* elms = FixedArray::cast(elements());
- if (index < static_cast<uint32_t>(elms->length())) {
- Object* value = elms->get(index);
- if (!value->IsTheHole()) return value;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ FixedArray* elms = FixedArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ Object* value = elms->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ break;
}
- } else {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- // Only accessors allowed as elements.
- FixedArray* structure = FixedArray::cast(element);
- Object* getter = structure->get(kGetterIndex);
- if (getter->IsJSFunction()) {
- return GetPropertyWithDefinedGetter(receiver,
- JSFunction::cast(getter));
- } else {
- // Getter is not a function.
- return Heap::undefined_value();
+ case PIXEL_ELEMENTS: {
+ PixelArray* pixels = PixelArray::cast(elements());
+ if (index < static_cast<uint32_t>(pixels->length())) {
+ uint8_t value = pixels->get(index);
+ return Smi::FromInt(value);
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ // Only accessors allowed as elements.
+ FixedArray* structure = FixedArray::cast(element);
+ Object* getter = structure->get(kGetterIndex);
+ if (getter->IsJSFunction()) {
+ return GetPropertyWithDefinedGetter(receiver,
+ JSFunction::cast(getter));
+ } else {
+ // Getter is not a function.
+ return Heap::undefined_value();
+ }
}
+ return element;
}
- return element;
+ break;
}
}
@@ -5719,16 +5866,27 @@ bool JSObject::HasDenseElements() {
int capacity = 0;
int number_of_elements = 0;
- if (HasFastElements()) {
- FixedArray* elms = FixedArray::cast(elements());
- capacity = elms->length();
- for (int i = 0; i < capacity; i++) {
- if (!elms->get(i)->IsTheHole()) number_of_elements++;
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ FixedArray* elms = FixedArray::cast(elements());
+ capacity = elms->length();
+ for (int i = 0; i < capacity; i++) {
+ if (!elms->get(i)->IsTheHole()) number_of_elements++;
+ }
+ break;
}
- } else {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
+ case PIXEL_ELEMENTS: {
+ return true;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
if (capacity == 0) return true;
@@ -5747,7 +5905,7 @@ bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
bool JSObject::ShouldConvertToFastElements() {
- ASSERT(!HasFastElements());
+ ASSERT(HasDictionaryElements());
NumberDictionary* dictionary = NumberDictionary::cast(elements());
// If the elements are sparse, we should not go back to fast case.
if (!HasDenseElements()) return false;
@@ -5848,12 +6006,12 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
}
-Object* JSObject::GetPropertyWithInterceptorProper(
+Object* JSObject::GetPropertyWithInterceptor(
JSObject* receiver,
String* name,
PropertyAttributes* attributes) {
+ InterceptorInfo* interceptor = GetNamedInterceptor();
HandleScope scope;
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
@@ -5872,85 +6030,14 @@ Object* JSObject::GetPropertyWithInterceptorProper(
VMState state(EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- if (!Top::has_scheduled_exception() && !result.IsEmpty()) {
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!result.IsEmpty()) {
*attributes = NONE;
return *v8::Utils::OpenHandle(*result);
}
}
- *attributes = ABSENT;
- return Heap::undefined_value();
-}
-
-
-Object* JSObject::GetInterceptorPropertyWithLookupHint(
- JSObject* receiver,
- Smi* lookup_hint,
- String* name,
- PropertyAttributes* attributes) {
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
-
- Object* result = GetPropertyWithInterceptorProper(receiver,
- name,
- attributes);
- if (*attributes != ABSENT) {
- return result;
- }
- RETURN_IF_SCHEDULED_EXCEPTION();
-
- int property_index = lookup_hint->value();
- if (property_index >= 0) {
- result = holder_handle->FastPropertyAt(property_index);
- } else {
- switch (property_index) {
- case kLookupInPrototype: {
- Object* pt = holder_handle->GetPrototype();
- *attributes = ABSENT;
- if (pt == Heap::null_value()) return Heap::undefined_value();
- result = pt->GetPropertyWithReceiver(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION();
- }
- break;
-
- case kLookupInHolder:
- result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION();
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- return result;
-}
-
-
-Object* JSObject::GetPropertyWithInterceptor(
- JSObject* receiver,
- String* name,
- PropertyAttributes* attributes) {
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(name);
-
- Object* result = GetPropertyWithInterceptorProper(receiver, name, attributes);
- if (*attributes != ABSENT) {
- return result;
- }
- RETURN_IF_SCHEDULED_EXCEPTION();
-
- result = holder_handle->GetPropertyPostInterceptor(
+ Object* result = holder_handle->GetPropertyPostInterceptor(
*receiver_handle,
*name_handle,
attributes);
@@ -6001,16 +6088,30 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
// Handle [] on String objects.
if (this->IsStringObjectWithCharacterAt(index)) return true;
- if (HasFastElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ return (index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole();
+ }
+ case PIXEL_ELEMENTS: {
+ PixelArray* pixels = PixelArray::cast(elements());
+ return index < static_cast<uint32_t>(pixels->length());
+ }
+ case DICTIONARY_ELEMENTS: {
+ return element_dictionary()->FindEntry(index)
+ != NumberDictionary::kNotFound;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
+ // All possibilities have been handled above already.
+ UNREACHABLE();
+ return Heap::null_value();
}
@@ -6193,24 +6294,43 @@ int JSObject::NumberOfEnumElements() {
int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
- if (HasFastElements()) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
- if (storage) {
- storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ int length = IsJSArray() ?
+ Smi::cast(JSArray::cast(this)->length())->value() :
+ FixedArray::cast(elements())->length();
+ for (int i = 0; i < length; i++) {
+ if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
+ if (storage != NULL) {
+ storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER);
+ }
+ counter++;
+ }
+ }
+ ASSERT(!storage || storage->length() >= counter);
+ break;
+ }
+ case PIXEL_ELEMENTS: {
+ int length = PixelArray::cast(elements())->length();
+ while (counter < length) {
+ if (storage != NULL) {
+ storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
}
counter++;
}
+ ASSERT(!storage || storage->length() >= counter);
+ break;
}
- ASSERT(!storage || storage->length() >= counter);
- } else {
- if (storage) {
- element_dictionary()->CopyKeysTo(storage, filter);
+ case DICTIONARY_ELEMENTS: {
+ if (storage != NULL) {
+ element_dictionary()->CopyKeysTo(storage, filter);
+ }
+ counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ break;
}
- counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ default:
+ UNREACHABLE();
+ break;
}
if (this->IsJSValue()) {
@@ -6669,7 +6789,7 @@ int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
- ASSERT(!HasFastElements());
+ ASSERT(HasDictionaryElements());
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
// elements.
@@ -6743,7 +6863,9 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// If the object is in dictionary mode, it is converted to fast elements
// mode.
Object* JSObject::PrepareElementsForSort(uint32_t limit) {
- if (!HasFastElements()) {
+ ASSERT(!HasPixelElements());
+
+ if (HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
NumberDictionary* dict = element_dictionary();
@@ -6768,7 +6890,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- FixedArray* elements = this->elements();
+ FixedArray* elements = FixedArray::cast(this->elements());
uint32_t elements_length = static_cast<uint32_t>(elements->length());
if (limit > elements_length) {
limit = elements_length ;
@@ -6838,6 +6960,41 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) {
}
+Object* PixelArray::SetValue(uint32_t index, Object* value) {
+ uint8_t clamped_value = 0;
+ if (index < static_cast<uint32_t>(length())) {
+ if (value->IsSmi()) {
+ int int_value = Smi::cast(value)->value();
+ if (int_value < 0) {
+ clamped_value = 0;
+ } else if (int_value > 255) {
+ clamped_value = 255;
+ } else {
+ clamped_value = static_cast<uint8_t>(int_value);
+ }
+ } else if (value->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(value)->value();
+ if (!(double_value > 0)) {
+ // NaN and less than zero clamp to zero.
+ clamped_value = 0;
+ } else if (double_value > 255) {
+ // Greater than 255 clamp to 255.
+ clamped_value = 255;
+ } else {
+ // Other doubles are rounded to the nearest integer.
+ clamped_value = static_cast<uint8_t>(double_value + 0.5);
+ }
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ ASSERT(value->IsUndefined());
+ }
+ set(index, clamped_value);
+ }
+ return Smi::FromInt(clamped_value);
+}
+
+
Object* GlobalObject::GetPropertyCell(LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h
index 5e5eb6b..03f0f3d 100644
--- a/V8Binding/v8/src/objects.h
+++ b/V8Binding/v8/src/objects.h
@@ -52,6 +52,7 @@
// - JSValue
// - Array
// - ByteArray
+// - PixelArray
// - FixedArray
// - DescriptorArray
// - HashTable
@@ -95,7 +96,6 @@
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
// Failure: [30 bit signed int] 11
-
// Ecma-262 3rd 8.6.1
enum PropertyAttributes {
NONE = v8::None,
@@ -270,6 +270,7 @@ enum PropertyNormalizationMode {
V(ODDBALL_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
+ V(PIXEL_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
V(ACCESSOR_INFO_TYPE) \
@@ -659,6 +660,7 @@ enum InstanceType {
JS_GLOBAL_PROPERTY_CELL_TYPE,
PROXY_TYPE,
BYTE_ARRAY_TYPE,
+ PIXEL_ARRAY_TYPE,
FILLER_TYPE,
SMI_TYPE,
@@ -760,6 +762,7 @@ class Object BASE_EMBEDDED {
inline bool IsNumber();
inline bool IsByteArray();
+ inline bool IsPixelArray();
inline bool IsFailure();
inline bool IsRetryAfterGC();
inline bool IsOutOfMemoryFailure();
@@ -798,7 +801,7 @@ class Object BASE_EMBEDDED {
// Returns true if this object is an instance of the specified
// function template.
- bool IsInstanceOf(FunctionTemplateInfo* type);
+ inline bool IsInstanceOf(FunctionTemplateInfo* type);
inline bool IsStruct();
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
@@ -1302,6 +1305,11 @@ class HeapNumber: public HeapObject {
class JSObject: public HeapObject {
public:
enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+ enum ElementsKind {
+ FAST_ELEMENTS,
+ DICTIONARY_ELEMENTS,
+ PIXEL_ELEMENTS
+ };
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case, and a Dictionary in the
@@ -1313,10 +1321,13 @@ class JSObject: public HeapObject {
// [elements]: The elements (properties with names that are integers).
// elements is a FixedArray in the fast case, and a Dictionary in the slow
- // case.
- DECL_ACCESSORS(elements, FixedArray) // Get and set fast elements.
+ // case or a PixelArray in a special case.
+ DECL_ACCESSORS(elements, Array) // Get and set fast elements.
inline void initialize_elements();
+ inline ElementsKind GetElementsKind();
inline bool HasFastElements();
+ inline bool HasDictionaryElements();
+ inline bool HasPixelElements();
inline NumberDictionary* element_dictionary(); // Gets slow elements.
// Collects elements starting at index 0.
@@ -1496,14 +1507,6 @@ class JSObject: public HeapObject {
Object* LookupCallbackSetterInPrototypes(uint32_t index);
void LookupCallback(String* name, LookupResult* result);
- inline Smi* InterceptorPropertyLookupHint(String* name);
- Object* GetInterceptorPropertyWithLookupHint(JSObject* receiver,
- Smi* lookup_hint,
- String* name,
- PropertyAttributes* attributes);
- static const int kLookupInHolder = -1;
- static const int kLookupInPrototype = -2;
-
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
int NumberOfLocalProperties(PropertyAttributes filter);
@@ -1581,8 +1584,11 @@ class JSObject: public HeapObject {
PropertyAttributes attributes);
// Convert the object to use the canonical dictionary
- // representation.
- Object* NormalizeProperties(PropertyNormalizationMode mode);
+ // representation. If the object is expected to have additional properties
+ // added this number can be indicated to have the backing store allocated to
+ // an initial capacity for holding these properties.
+ Object* NormalizeProperties(PropertyNormalizationMode mode,
+ int expected_additional_properties);
Object* NormalizeElements();
// Transform slow named properties to fast variants.
@@ -1695,12 +1701,6 @@ class JSObject: public HeapObject {
void LookupInDescriptor(String* name, LookupResult* result);
- // Attempts to get property with a named interceptor getter.
- // Sets |attributes| to ABSENT if interceptor didn't return anything
- Object* GetPropertyWithInterceptorProper(JSObject* receiver,
- String* name,
- PropertyAttributes* attributes);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2440,6 +2440,45 @@ class ByteArray: public Array {
};
+// A PixelArray represents a fixed-size byte array with special semantics
+// used for implementing the CanvasPixelArray object. Please see the
+// specification at:
+// http://www.whatwg.org/specs/web-apps/current-work/
+// multipage/the-canvas-element.html#canvaspixelarray
+// In particular, write access clamps the value written to 0 or 255 if the
+// value written is outside this range.
+class PixelArray: public Array {
+ public:
+ // [external_pointer]: The pointer to the external memory area backing this
+ // pixel array.
+ DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
+
+ // Setter and getter.
+ inline uint8_t get(int index);
+ inline void set(int index, uint8_t value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber and
+ // undefined and clamps the converted value between 0 and 255.
+ Object* SetValue(uint32_t index, Object* value);
+
+ // Casting.
+ static inline PixelArray* cast(Object* obj);
+
+#ifdef DEBUG
+ void PixelArrayPrint();
+ void PixelArrayVerify();
+#endif // DEBUG
+
+ // PixelArray headers are not quadword aligned.
+ static const int kExternalPointerOffset = Array::kAlignedSize;
+ static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
+ static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
+};
+
+
// Code describes objects with on-the-fly generated machine code.
class Code: public HeapObject {
public:
@@ -3137,6 +3176,9 @@ class JSFunction: public JSObject {
// function.
inline bool IsBoilerplate();
+ // Tells whether this function is builtin.
+ inline bool IsBuiltin();
+
// [literals]: Fixed array holding the materialized literals.
//
// If the function contains object, regexp or array literals, the
@@ -3996,8 +4038,8 @@ class SlicedString: public String {
// Layout description
#if V8_HOST_ARCH_64_BIT
// Optimizations expect buffer to be located at same offset as a ConsString's
- // first substring. In 64 bit mode we have room for the size before the
- // buffer.
+ // first substring. In 64 bit mode we have room for the start offset before
+ // the buffer.
static const int kStartOffset = String::kSize;
static const int kBufferOffset = kStartOffset + kIntSize;
static const int kSize = kBufferOffset + kPointerSize;
diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc
index da2b286..348c12a 100644
--- a/V8Binding/v8/src/parser.cc
+++ b/V8Binding/v8/src/parser.cc
@@ -1059,7 +1059,7 @@ VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode,
class Target BASE_EMBEDDED {
public:
- Target(Parser* parser, Node* node)
+ Target(Parser* parser, AstNode* node)
: parser_(parser), node_(node), previous_(parser_->target_stack_) {
parser_->target_stack_ = this;
}
@@ -1069,11 +1069,11 @@ class Target BASE_EMBEDDED {
}
Target* previous() { return previous_; }
- Node* node() { return node_; }
+ AstNode* node() { return node_; }
private:
Parser* parser_;
- Node* node_;
+ AstNode* node_;
Target* previous_;
};
diff --git a/V8Binding/v8/src/platform-linux.cc b/V8Binding/v8/src/platform-linux.cc
index bccf9e6..6ec5070 100644
--- a/V8Binding/v8/src/platform-linux.cc
+++ b/V8Binding/v8/src/platform-linux.cc
@@ -223,62 +223,63 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
+void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
-static uintptr_t StringToULong(char* buffer) {
- return strtoul(buffer, NULL, 16); // NOLINT
-}
-#endif
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE *fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return;
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
+ // This loop will terminate once the scanning hits an EOF.
while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- uintptr_t start = StringToULong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- uintptr_t end = StringToULong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // If there is no filename for this line then log it as an anonymous
- // mapping and use the address as its name.
- if (start_of_path == NULL) {
- // 40 is enough to print a 64 bit address range.
- ASSERT(sizeof(buffer) > 40);
- snprintf(buffer,
- sizeof(buffer),
- "%08" V8PRIxPTR "-%08" V8PRIxPTR,
- start,
- end);
- LOG(SharedLibraryEvent(buffer, start, end));
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_x == 'x') {
+ // Found a readable and executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ LOG(SharedLibraryEvent(lib_name, start, end));
} else {
- buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
}
}
- close(fd);
+ free(lib_name);
+ fclose(fp);
#endif
}
diff --git a/V8Binding/v8/src/platform-macos.cc b/V8Binding/v8/src/platform-macos.cc
index b5a57e1..c081064 100644
--- a/V8Binding/v8/src/platform-macos.cc
+++ b/V8Binding/v8/src/platform-macos.cc
@@ -424,14 +424,10 @@ class MacOSMutex : public Mutex {
public:
MacOSMutex() {
- // For some reason the compiler doesn't allow you to write
- // "this->mutex_ = PTHREAD_..." directly on mac.
- pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&m, &attr);
- mutex_ = m;
+ pthread_mutex_init(&mutex_, &attr);
}
~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
@@ -519,35 +515,31 @@ class Sampler::PlatformData : public Malloced {
thread_state_flavor_t flavor = x86_THREAD_STATE64;
x86_thread_state64_t state;
mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __r ## name
+#else
+#define REGISTER_FIELD(name) r ## name
+#endif // __DARWIN_UNIX03
#elif V8_HOST_ARCH_IA32
thread_state_flavor_t flavor = i386_THREAD_STATE;
i386_thread_state_t state;
mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __e ## name
+#else
+#define REGISTER_FIELD(name) e ## name
+#endif // __DARWIN_UNIX03
#else
#error Unsupported Mac OS X host architecture.
-#endif // V8_TARGET_ARCH_IA32
+#endif // V8_HOST_ARCH
+
if (thread_get_state(profiled_thread_,
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
-#if V8_HOST_ARCH_X64
- UNIMPLEMENTED();
- sample.pc = 0;
- sample.sp = 0;
- sample.fp = 0;
-#elif V8_HOST_ARCH_IA32
-#if __DARWIN_UNIX03
- sample.pc = state.__eip;
- sample.sp = state.__esp;
- sample.fp = state.__ebp;
-#else // !__DARWIN_UNIX03
- sample.pc = state.eip;
- sample.sp = state.esp;
- sample.fp = state.ebp;
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH_IA32
+ sample.pc = state.REGISTER_FIELD(ip);
+ sample.sp = state.REGISTER_FIELD(sp);
+ sample.fp = state.REGISTER_FIELD(bp);
sampler_->SampleStack(&sample);
}
thread_resume(profiled_thread_);
@@ -564,6 +556,8 @@ class Sampler::PlatformData : public Malloced {
}
};
+#undef REGISTER_FIELD
+
// Entry point for sampler thread.
static void* SamplerEntry(void* arg) {
diff --git a/V8Binding/v8/src/platform-nullos.cc b/V8Binding/v8/src/platform-nullos.cc
index 60ae76d..c0cf7f4 100644
--- a/V8Binding/v8/src/platform-nullos.cc
+++ b/V8Binding/v8/src/platform-nullos.cc
@@ -80,7 +80,7 @@ int64_t OS::Ticks() {
// Returns a string identifying the current timezone taking into
// account daylight saving.
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
UNIMPLEMENTED();
return "<none>";
}
diff --git a/V8Binding/v8/src/platform-posix.cc b/V8Binding/v8/src/platform-posix.cc
index 6174522..b8fe967 100644
--- a/V8Binding/v8/src/platform-posix.cc
+++ b/V8Binding/v8/src/platform-posix.cc
@@ -86,16 +86,20 @@ int64_t OS::Ticks() {
}
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
- return const_cast<char*>(t->tm_zone);
+ if (NULL == t) return "";
+ return t->tm_zone;
}
double OS::DaylightSavingsOffset(double time) {
+ if (isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
+ if (NULL == t) return nan_value();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
diff --git a/V8Binding/v8/src/platform-win32.cc b/V8Binding/v8/src/platform-win32.cc
index a8a6243..633b2c2 100644
--- a/V8Binding/v8/src/platform-win32.cc
+++ b/V8Binding/v8/src/platform-win32.cc
@@ -603,7 +603,7 @@ int64_t OS::Ticks() {
// Returns a string identifying the current timezone taking into
// account daylight saving.
-char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time) {
return Time(time).LocalTimezone();
}
diff --git a/V8Binding/v8/src/platform.h b/V8Binding/v8/src/platform.h
index 11a1e79..76bf891 100644
--- a/V8Binding/v8/src/platform.h
+++ b/V8Binding/v8/src/platform.h
@@ -143,7 +143,7 @@ class OS {
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
- static char* LocalTimezone(double time);
+ static const char* LocalTimezone(double time);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
diff --git a/V8Binding/v8/src/prettyprinter.cc b/V8Binding/v8/src/prettyprinter.cc
index 79f1883..7a8af40 100644
--- a/V8Binding/v8/src/prettyprinter.cc
+++ b/V8Binding/v8/src/prettyprinter.cc
@@ -417,7 +417,7 @@ void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
}
-const char* PrettyPrinter::Print(Node* node) {
+const char* PrettyPrinter::Print(AstNode* node) {
Init();
Visit(node);
return output_;
@@ -441,7 +441,7 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Node* node) {
+void PrettyPrinter::PrintOut(AstNode* node) {
PrettyPrinter printer;
PrintF("%s", printer.Print(node));
}
@@ -700,7 +700,7 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
}
-void AstPrinter::PrintIndentedVisit(const char* s, Node* node) {
+void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
IndentedScope indent(s);
Visit(node);
}
@@ -934,6 +934,9 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::COMPUTED:
prop_kind = "PROPERTY - COMPUTED";
break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
+ break;
case ObjectLiteral::Property::PROTOTYPE:
prop_kind = "PROPERTY - PROTOTYPE";
break;
@@ -945,7 +948,6 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
break;
default:
UNREACHABLE();
- break;
}
IndentedScope prop(prop_kind);
PrintIndentedVisit("KEY", node->properties()->at(i)->key());
diff --git a/V8Binding/v8/src/prettyprinter.h b/V8Binding/v8/src/prettyprinter.h
index bfce9b0..8a6d1fb 100644
--- a/V8Binding/v8/src/prettyprinter.h
+++ b/V8Binding/v8/src/prettyprinter.h
@@ -42,17 +42,17 @@ class PrettyPrinter: public AstVisitor {
// The following routines print a node into a string.
// The result string is alive as long as the PrettyPrinter is alive.
- const char* Print(Node* node);
+ const char* Print(AstNode* node);
const char* PrintExpression(FunctionLiteral* program);
const char* PrintProgram(FunctionLiteral* program);
// Print a node to stdout.
- static void PrintOut(Node* node);
+ static void PrintOut(AstNode* node);
// Individual nodes
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
private:
@@ -87,12 +87,12 @@ class AstPrinter: public PrettyPrinter {
// Individual nodes
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
private:
friend class IndentedScope;
void PrintIndented(const char* txt);
- void PrintIndentedVisit(const char* s, Node* node);
+ void PrintIndentedVisit(const char* s, AstNode* node);
void PrintStatements(ZoneList<Statement*>* statements);
void PrintDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/V8Binding/v8/src/rewriter.cc b/V8Binding/v8/src/rewriter.cc
index 8a7267a..d6ea68e 100644
--- a/V8Binding/v8/src/rewriter.cc
+++ b/V8Binding/v8/src/rewriter.cc
@@ -59,7 +59,7 @@ class AstOptimizer: public AstVisitor {
// Node visitors.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
@@ -557,7 +557,7 @@ class Processor: public AstVisitor {
// Node visitors.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index 350d391..56e9f85 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -155,33 +155,43 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
}
// Deep copy local elements.
- if (copy->HasFastElements()) {
- FixedArray* elements = copy->elements();
- WriteBarrierMode mode = elements->GetWriteBarrierMode();
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- if (value->IsJSObject()) {
- JSObject* jsObject = JSObject::cast(value);
- result = DeepCopyBoilerplate(jsObject);
- if (result->IsFailure()) return result;
- elements->set(i, result, mode);
- }
- }
- } else {
- NumberDictionary* element_dictionary = copy->element_dictionary();
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
+ // Pixel elements cannot be created using an object literal.
+ ASSERT(!copy->HasPixelElements());
+ switch (copy->GetElementsKind()) {
+ case JSObject::FAST_ELEMENTS: {
+ FixedArray* elements = FixedArray::cast(copy->elements());
+ WriteBarrierMode mode = elements->GetWriteBarrierMode();
+ for (int i = 0; i < elements->length(); i++) {
+ Object* value = elements->get(i);
if (value->IsJSObject()) {
JSObject* jsObject = JSObject::cast(value);
result = DeepCopyBoilerplate(jsObject);
if (result->IsFailure()) return result;
- element_dictionary->ValueAtPut(i, result);
+ elements->set(i, result, mode);
}
}
+ break;
}
+ case JSObject::DICTIONARY_ELEMENTS: {
+ NumberDictionary* element_dictionary = copy->element_dictionary();
+ int capacity = element_dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = element_dictionary->KeyAt(i);
+ if (element_dictionary->IsKey(k)) {
+ Object* value = element_dictionary->ValueAt(i);
+ if (value->IsJSObject()) {
+ JSObject* jsObject = JSObject::cast(value);
+ result = DeepCopyBoilerplate(jsObject);
+ if (result->IsFailure()) return result;
+ element_dictionary->ValueAtPut(i, result);
+ }
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
return copy;
}
@@ -258,6 +268,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
{ // Add the constant properties to the boilerplate.
int length = constant_properties->length();
OptimizedObjectForAddingMultipleProperties opt(boilerplate,
+ length / 2,
!is_result_from_cache);
for (int index = 0; index < length; index +=2) {
Handle<Object> key(constant_properties->get(index+0));
@@ -1637,7 +1648,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
case SUBJECT_CAPTURE: {
int capture = part.data;
- FixedArray* match_info = last_match_info->elements();
+ FixedArray* match_info = FixedArray::cast(last_match_info->elements());
int from = RegExpImpl::GetCapture(match_info, capture * 2);
int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
if (from >= 0 && to > from) {
@@ -1717,7 +1728,8 @@ static Object* StringReplaceRegExpWithString(String* subject,
int start, end;
{
AssertNoAllocation match_info_array_is_not_in_a_handle;
- FixedArray* match_info_array = last_match_info_handle->elements();
+ FixedArray* match_info_array =
+ FixedArray::cast(last_match_info_handle->elements());
ASSERT_EQ(capture_count * 2 + 2,
RegExpImpl::GetLastCaptureCount(match_info_array));
@@ -2345,7 +2357,7 @@ static Object* Runtime_StringMatch(Arguments args) {
int end;
{
AssertNoAllocation no_alloc;
- FixedArray* elements = regexp_info->elements();
+ FixedArray* elements = FixedArray::cast(regexp_info->elements());
start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
}
@@ -3022,7 +3034,7 @@ static Object* Runtime_ToSlowProperties(Arguments args) {
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
+ js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
}
return *object;
}
@@ -4885,7 +4897,7 @@ static Object* Runtime_DateParseString(Arguments args) {
AssertNoAllocation no_allocation;
- FixedArray* output_array = output->elements();
+ FixedArray* output_array = FixedArray::cast(output->elements());
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
bool result;
if (str->IsAsciiRepresentation()) {
@@ -4908,7 +4920,7 @@ static Object* Runtime_DateLocalTimezone(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- char* zone = OS::LocalTimezone(x);
+ const char* zone = OS::LocalTimezone(x);
return Heap::AllocateStringFromUtf8(CStrVector(zone));
}
@@ -5173,37 +5185,62 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
ArrayConcatVisitor* visitor) {
uint32_t num_of_elements = 0;
- if (receiver->HasFastElements()) {
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
- uint32_t len = elements->length();
- if (range < len) len = range;
+ switch (receiver->GetElementsKind()) {
+ case JSObject::FAST_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
+ uint32_t len = elements->length();
+ if (range < len) {
+ len = range;
+ }
- for (uint32_t j = 0; j < len; j++) {
- Handle<Object> e(elements->get(j));
- if (!e->IsTheHole()) {
+ for (uint32_t j = 0; j < len; j++) {
+ Handle<Object> e(elements->get(j));
+ if (!e->IsTheHole()) {
+ num_of_elements++;
+ if (visitor) {
+ visitor->visit(j, e);
+ }
+ }
+ }
+ break;
+ }
+ case JSObject::PIXEL_ELEMENTS: {
+ Handle<PixelArray> pixels(PixelArray::cast(receiver->elements()));
+ uint32_t len = pixels->length();
+ if (range < len) {
+ len = range;
+ }
+
+ for (uint32_t j = 0; j < len; j++) {
num_of_elements++;
- if (visitor)
+ if (visitor != NULL) {
+ Handle<Smi> e(Smi::FromInt(pixels->get(j)));
visitor->visit(j, e);
+ }
}
+ break;
}
-
- } else {
- Handle<NumberDictionary> dict(receiver->element_dictionary());
- uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- Handle<Object> k(dict->KeyAt(j));
- if (dict->IsKey(*k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- num_of_elements++;
- if (visitor) {
- visitor->visit(index,
- Handle<Object>(dict->ValueAt(j)));
+ case JSObject::DICTIONARY_ELEMENTS: {
+ Handle<NumberDictionary> dict(receiver->element_dictionary());
+ uint32_t capacity = dict->Capacity();
+ for (uint32_t j = 0; j < capacity; j++) {
+ Handle<Object> k(dict->KeyAt(j));
+ if (dict->IsKey(*k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < range) {
+ num_of_elements++;
+ if (visitor) {
+ visitor->visit(index, Handle<Object>(dict->ValueAt(j)));
+ }
}
}
}
+ break;
}
+ default:
+ UNREACHABLE();
+ break;
}
return num_of_elements;
@@ -7408,14 +7445,15 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
// Not sure when this can happen but skip it just in case.
if (!raw_fun->IsJSFunction())
return false;
- if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) {
+ if ((raw_fun == caller) && !(*seen_caller)) {
*seen_caller = true;
return false;
}
- // Skip the most obvious builtin calls. Some builtin calls (such as
- // Number.ADD which is invoked using 'call') are very difficult to
- // recognize so we're leaving them in for now.
- return !frame->receiver()->IsJSBuiltinsObject();
+ // Skip all frames until we've seen the caller. Also, skip the most
+ // obvious builtin calls. Some builtin calls (such as Number.ADD
+ // which is invoked using 'call') are very difficult to recognize
+ // so we're leaving them in for now.
+ return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
}
@@ -7424,7 +7462,7 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
// code offset.
static Object* Runtime_CollectStackTrace(Arguments args) {
ASSERT_EQ(args.length(), 2);
- Object* caller = args[0];
+ Handle<Object> caller = args.at<Object>(0);
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
HandleScope scope;
@@ -7433,12 +7471,14 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;
- bool seen_caller = false;
+ // If the caller parameter is a function we skip frames until we're
+ // under it before starting to collect.
+ bool seen_caller = !caller->IsJSFunction();
int cursor = 0;
int frames_seen = 0;
while (!iter.done() && frames_seen < limit) {
StackFrame* raw_frame = iter.frame();
- if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) {
+ if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
frames_seen++;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Object* recv = frame->receiver();
@@ -7446,15 +7486,17 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
Address pc = frame->pc();
Address start = frame->code()->address();
Smi* offset = Smi::FromInt(pc - start);
- FixedArray* elements = result->elements();
+ FixedArray* elements = FixedArray::cast(result->elements());
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
elements->set(cursor++, fun);
elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
} else {
HandleScope scope;
- SetElement(result, cursor++, Handle<Object>(recv));
- SetElement(result, cursor++, Handle<Object>(fun));
+ Handle<Object> recv_handle(recv);
+ Handle<Object> fun_handle(fun);
+ SetElement(result, cursor++, recv_handle);
+ SetElement(result, cursor++, fun_handle);
SetElement(result, cursor++, Handle<Smi>(offset));
}
}
diff --git a/V8Binding/v8/src/spaces-inl.h b/V8Binding/v8/src/spaces-inl.h
index 8b2eab0..da72497 100644
--- a/V8Binding/v8/src/spaces-inl.h
+++ b/V8Binding/v8/src/spaces-inl.h
@@ -103,9 +103,9 @@ void Page::ClearRSet() {
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
// For a 64-bit address, if it is:
-// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) |
+// | page address | words(5) | bit offset(5) | pointer alignment (3) |
// The address of the rset word containing the bit for this word is computed as:
-// page_address + quadwords * 4 + kRSetOffset.
+// page_address + words * 4 + kRSetOffset.
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
// even on the X64 architecture.
@@ -115,7 +115,7 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
Page* page = Page::FromAddress(address);
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
- kObjectAlignmentBits);
+ kPointerSizeLog2);
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
@@ -127,20 +127,19 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
if (rset_address >= page->RSetEnd()) {
// We have a large object page, and the remembered set address is actually
- // past the end of the object. The address of the remembered set in this
- // case is the extra remembered set start address at the address of the
- // end of the object:
+ // past the end of the object.
+
+ // The first part of the remembered set is still located at the start of
+ // the page, but anything after kRSetEndOffset must be relocated to after
+ // the large object, i.e. after
// (page->ObjectAreaStart() + object size)
- // plus the offset of the computed remembered set address from the start
- // of the object:
- // (rset_address - page->ObjectAreaStart()).
- // Ie, we can just add the object size.
- // In the X64 architecture, the remembered set ends before the object start,
- // so we need to add an additional offset, from rset end to object start
+ // We do that by adding the difference between the normal RSet's end and
+ // the object's end.
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
- rset_address += kObjectStartOffset - kRSetEndOffset +
+ int fixedarray_length =
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ Array::kLengthOffset));
+ rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
}
return rset_address;
}
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index 94f7a91..57e7c1f 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -99,8 +99,11 @@ class AllocationInfo;
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
-// TODO(X64): This description only represents the 32-bit layout.
-// On the 64-bit platform, we add an offset to the start of the remembered set.
+//
+// On the 64-bit platform, we add an offset to the start of the remembered set,
+// and pointers are aligned to 8-byte pointer size. This means that we need
+// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
+// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@@ -118,7 +121,7 @@ class Page {
// from [page_addr .. page_addr + kPageSize[
//
// Note that this function only works for addresses in normal paged
- // spaces and addresses in the first 8K of large object pages (ie,
+ // spaces and addresses in the first 8K of large object pages (i.e.,
// the start of large objects but not necessarily derived pointers
// within them).
INLINE(static Page* FromAddress(Address a)) {
@@ -218,7 +221,7 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
- // The offset of the remembered set in a page, in addition to the empty words
+ // The offset of the remembered set in a page, in addition to the empty bytes
// formed as the remembered bits of the remembered set itself.
#ifdef V8_TARGET_ARCH_X64
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
@@ -234,7 +237,7 @@ class Page {
// to align start of rset to a uint32_t address.
static const int kObjectStartOffset = 256;
- // The start offset of the remembered set in a page.
+ // The start offset of the used part of the remembered set in a page.
static const int kRSetStartOffset = kRSetOffset +
kObjectStartOffset / kBitsPerPointer;
@@ -264,16 +267,16 @@ class Page {
// low-order bit for large object pages will be cleared.
int is_normal_page;
- // The following fields overlap with remembered set, they can only
+ // The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
// used.
- // The allocation pointer after relocating objects to this page.
- Address mc_relocation_top;
-
// The index of the page in its owner space.
int mc_page_index;
+ // The allocation pointer after relocating objects to this page.
+ Address mc_relocation_top;
+
// The forwarding address of the first live object in this page.
Address mc_first_forwarded;
@@ -931,34 +934,41 @@ class PagedSpace : public Space {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to stdout (when compiled
-// with DEBUG) or to the log file (when compiled with
-// ENABLE_LOGGING_AND_PROFILING).
-class HistogramInfo BASE_EMBEDDED {
+class NumberAndSizeInfo BASE_EMBEDDED {
public:
- HistogramInfo() : number_(0), bytes_(0) {}
+ NumberAndSizeInfo() : number_(0), bytes_(0) {}
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
-
- int number() { return number_; }
+ int number() const { return number_; }
void increment_number(int num) { number_ += num; }
- int bytes() { return bytes_; }
+ int bytes() const { return bytes_; }
void increment_bytes(int size) { bytes_ += size; }
- // Clear the number of objects and size fields, but not the name.
void clear() {
number_ = 0;
bytes_ = 0;
}
private:
- const char* name_;
int number_;
int bytes_;
};
+
+
+// HistogramInfo class for recording a single "bar" of a histogram. This
+// class is used for collecting statistics to print to stdout (when compiled
+// with DEBUG) or to the log file (when compiled with
+// ENABLE_LOGGING_AND_PROFILING).
+class HistogramInfo: public NumberAndSizeInfo {
+ public:
+ HistogramInfo() : NumberAndSizeInfo() {}
+
+ const char* name() { return name_; }
+ void set_name(const char* name) { name_ = name; }
+
+ private:
+ const char* name_;
+};
#endif
@@ -1158,7 +1168,7 @@ class NewSpace : public Space {
// The start address of the space and a bit mask. Anding an address in the
// new space with the mask will result in the start address.
Address start() { return start_; }
- uint32_t mask() { return address_mask_; }
+ uintptr_t mask() { return address_mask_; }
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
diff --git a/V8Binding/v8/src/string-stream.cc b/V8Binding/v8/src/string-stream.cc
index ee343a5..cec4167 100644
--- a/V8Binding/v8/src/string-stream.cc
+++ b/V8Binding/v8/src/string-stream.cc
@@ -153,7 +153,7 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
}
break;
}
- case 'i': case 'd': case 'u': case 'x': case 'c': case 'p': case 'X': {
+ case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
int value = current.data_.u_int_;
EmbeddedVector<char, 24> formatted;
int length = OS::SNPrintF(formatted, temp.start(), value);
@@ -167,6 +167,13 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
Add(formatted.start());
break;
}
+ case 'p': {
+ void* value = current.data_.u_pointer_;
+ EmbeddedVector<char, 20> formatted;
+ OS::SNPrintF(formatted, temp.start(), value);
+ Add(formatted.start());
+ break;
+ }
default:
UNREACHABLE();
break;
diff --git a/V8Binding/v8/src/string-stream.h b/V8Binding/v8/src/string-stream.h
index 5732944..6649f18 100644
--- a/V8Binding/v8/src/string-stream.h
+++ b/V8Binding/v8/src/string-stream.h
@@ -90,21 +90,12 @@ class FmtElm {
FmtElm(Handle<Object> value) : type_(HANDLE) { // NOLINT
data_.u_handle_ = value.location();
}
- FmtElm(void* value) : type_(INT) { // NOLINT
-#if V8_HOST_ARCH_64_BIT
- // TODO(x64): FmtElm needs to treat pointers as pointers, and not as
- // ints. This will require adding a pointer type, etc. For now just
- // hack it and truncate the pointer.
- // http://code.google.com/p/v8/issues/detail?id=335
- data_.u_int_ = 0;
- UNIMPLEMENTED();
-#else
- data_.u_int_ = reinterpret_cast<int>(value);
-#endif
+ FmtElm(void* value) : type_(POINTER) { // NOLINT
+ data_.u_pointer_ = value;
}
private:
friend class StringStream;
- enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE };
+ enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
Type type_;
union {
int u_int_;
@@ -113,6 +104,7 @@ class FmtElm {
const Vector<const uc16>* u_lc_str_;
Object* u_obj_;
Object** u_handle_;
+ void* u_pointer_;
} data_;
};
diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc
index 7ca2677..b25f5b4 100644
--- a/V8Binding/v8/src/stub-cache.cc
+++ b/V8Binding/v8/src/stub-cache.cc
@@ -736,22 +736,22 @@ Handle<Code> ComputeCallMiss(int argc) {
Object* LoadCallbackProperty(Arguments args) {
Handle<JSObject> recv = args.at<JSObject>(0);
- AccessorInfo* callback = AccessorInfo::cast(args[1]);
+ Handle<JSObject> holder = args.at<JSObject>(1);
+ AccessorInfo* callback = AccessorInfo::cast(args[2]);
+ Handle<Object> data = args.at<Object>(3);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- Handle<String> name = args.at<String>(2);
- Handle<JSObject> holder = args.at<JSObject>(3);
- HandleScope scope;
- Handle<Object> data(callback->data());
- LOG(ApiNamedPropertyAccess("load", *recv, *name));
+ Handle<String> name = args.at<String>(4);
// NOTE: If we can align the structure of an AccessorInfo with the
// locations of the arguments to this function maybe we don't have
// to explicitly create the structure but can just pass a pointer
// into the stack.
+ LOG(ApiNamedPropertyAccess("load", *recv, *name));
v8::AccessorInfo info(v8::Utils::ToLocal(recv),
v8::Utils::ToLocal(data),
v8::Utils::ToLocal(holder));
+ HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
@@ -787,49 +787,129 @@ Object* StoreCallbackProperty(Arguments args) {
return *value;
}
+/**
+ * Attempts to load a property with an interceptor (which must be present),
+ * but doesn't search the prototype chain.
+ *
+ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
+ * provide any value for the given name.
+ */
+Object* LoadPropertyWithInterceptorOnly(Arguments args) {
+ Handle<JSObject> receiver_handle = args.at<JSObject>(0);
+ Handle<JSObject> holder_handle = args.at<JSObject>(1);
+ Handle<String> name_handle = args.at<String>(2);
+ Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
+ Handle<Object> data_handle = args.at<Object>(4);
+
+ Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+ v8::NamedPropertyGetter getter =
+ FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ ASSERT(getter != NULL);
-Object* LoadInterceptorProperty(Arguments args) {
- JSObject* recv = JSObject::cast(args[0]);
- JSObject* holder = JSObject::cast(args[1]);
- String* name = String::cast(args[2]);
- Smi* lookup_hint = Smi::cast(args[3]);
- ASSERT(holder->HasNamedInterceptor());
- PropertyAttributes attr = NONE;
-
- Object* result = holder->GetInterceptorPropertyWithLookupHint(
- recv, lookup_hint, name, &attr);
- if (result->IsFailure()) return result;
+ {
+ // Use the interceptor getter.
+ v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
+ HandleScope scope;
+ v8::Handle<v8::Value> r;
+ {
+ // Leaving JavaScript.
+ VMState state(EXTERNAL);
+ r = getter(v8::Utils::ToLocal(name_handle), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!r.IsEmpty()) {
+ return *v8::Utils::OpenHandle(*r);
+ }
+ }
- // If the property is present, return it.
- if (attr != ABSENT) return result;
+ return Heap::no_interceptor_result_sentinel();
+}
- // If the top frame is an internal frame, this is really a call
- // IC. In this case, we simply return the undefined result which
- // will lead to an exception when trying to invoke the result as a
- // function.
- StackFrameIterator it;
- it.Advance(); // skip exit frame
- if (it.frame()->is_internal()) return result;
+static Object* ThrowReferenceError(String* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.is_contextual()) return result;
+ if (!ic.is_contextual()) return Heap::undefined_value();
// Throw a reference error.
+ HandleScope scope;
+ Handle<String> name_handle(name);
+ Handle<Object> error =
+ Factory::NewReferenceError("not_defined",
+ HandleVector(&name_handle, 1));
+ return Top::Throw(*error);
+}
+
+
+static Object* LoadWithInterceptor(Arguments* args,
+ PropertyAttributes* attrs) {
+ Handle<JSObject> receiver_handle = args->at<JSObject>(0);
+ Handle<JSObject> holder_handle = args->at<JSObject>(1);
+ Handle<String> name_handle = args->at<String>(2);
+ Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3);
+ Handle<Object> data_handle = args->at<Object>(4);
+
+ Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+ v8::NamedPropertyGetter getter =
+ FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ ASSERT(getter != NULL);
+
{
+ // Use the interceptor getter.
+ v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(data_handle),
+ v8::Utils::ToLocal(holder_handle));
HandleScope scope;
- // We cannot use the raw name pointer here since getting the
- // property might cause a GC. However, we can get the name from
- // the stack using the arguments object.
- Handle<String> name_handle = args.at<String>(2);
- Handle<Object> error =
- Factory::NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return Top::Throw(*error);
+ v8::Handle<v8::Value> r;
+ {
+ // Leaving JavaScript.
+ VMState state(EXTERNAL);
+ r = getter(v8::Utils::ToLocal(name_handle), info);
+ }
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ if (!r.IsEmpty()) {
+ *attrs = NONE;
+ return *v8::Utils::OpenHandle(*r);
+ }
}
+
+ Object* result = holder_handle->GetPropertyPostInterceptor(
+ *receiver_handle,
+ *name_handle,
+ attrs);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ return result;
+}
+
+
+/**
+ * Loads a property with an interceptor performing post interceptor
+ * lookup if interceptor failed.
+ */
+Object* LoadPropertyWithInterceptorForLoad(Arguments args) {
+ PropertyAttributes attr = NONE;
+ Object* result = LoadWithInterceptor(&args, &attr);
+ if (result->IsFailure()) return result;
+
+ // If the property is present, return it.
+ if (attr != ABSENT) return result;
+ return ThrowReferenceError(String::cast(args[2]));
+}
+
+
+Object* LoadPropertyWithInterceptorForCall(Arguments args) {
+ PropertyAttributes attr;
+ Object* result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_SCHEDULED_EXCEPTION();
+ // This is call IC. In this case, we simply return the undefined result which
+ // will lead to an exception when trying to invoke the result as a
+ // function.
+ return result;
}
diff --git a/V8Binding/v8/src/stub-cache.h b/V8Binding/v8/src/stub-cache.h
index c6b002b..3b3caad 100644
--- a/V8Binding/v8/src/stub-cache.h
+++ b/V8Binding/v8/src/stub-cache.h
@@ -307,7 +307,9 @@ Object* StoreCallbackProperty(Arguments args);
// Support functions for IC stubs for interceptors.
-Object* LoadInterceptorProperty(Arguments args);
+Object* LoadPropertyWithInterceptorOnly(Arguments args);
+Object* LoadPropertyWithInterceptorForLoad(Arguments args);
+Object* LoadPropertyWithInterceptorForCall(Arguments args);
Object* StoreInterceptorProperty(Arguments args);
Object* CallInterceptorProperty(Arguments args);
@@ -377,13 +379,6 @@ class StubCompiler BASE_EMBEDDED {
Label* miss_label);
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
- protected:
- Object* GetCodeWithFlags(Code::Flags flags, const char* name);
- Object* GetCodeWithFlags(Code::Flags flags, String* name);
-
- MacroAssembler* masm() { return &masm_; }
- void set_failure(Failure* failure) { failure_ = failure; }
-
// Check the integrity of the prototype chain to make sure that the
// current IC is still valid.
Register CheckPrototypes(JSObject* object,
@@ -394,6 +389,13 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
+ protected:
+ Object* GetCodeWithFlags(Code::Flags flags, const char* name);
+ Object* GetCodeWithFlags(Code::Flags flags, String* name);
+
+ MacroAssembler* masm() { return &masm_; }
+ void set_failure(Failure* failure) { failure_ = failure; }
+
void GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
@@ -424,7 +426,7 @@ class StubCompiler BASE_EMBEDDED {
void GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
- Smi* lookup_hint,
+ LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
diff --git a/V8Binding/v8/src/usage-analyzer.cc b/V8Binding/v8/src/usage-analyzer.cc
index 36464fa..5514f40 100644
--- a/V8Binding/v8/src/usage-analyzer.cc
+++ b/V8Binding/v8/src/usage-analyzer.cc
@@ -42,7 +42,7 @@ static const int InitialWeight = 100;
class UsageComputer: public AstVisitor {
public:
- static bool Traverse(Node* node);
+ static bool Traverse(AstNode* node);
void VisitBlock(Block* node);
void VisitDeclaration(Declaration* node);
@@ -116,7 +116,7 @@ class WeightScaler BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of UsageComputer
-bool UsageComputer::Traverse(Node* node) {
+bool UsageComputer::Traverse(AstNode* node) {
UsageComputer uc(InitialWeight, false);
uc.Visit(node);
return !uc.HasStackOverflow();
diff --git a/V8Binding/v8/src/variables.h b/V8Binding/v8/src/variables.h
index 3f1f11b..c2adb23 100644
--- a/V8Binding/v8/src/variables.h
+++ b/V8Binding/v8/src/variables.h
@@ -162,9 +162,7 @@ class Variable: public ZoneObject {
// be the global scope). scope() is NULL in that case. Currently the
// scope is only used to follow the context chain length.
Scope* scope() const { return scope_; }
- // If this assertion fails it means that some code has tried to
- // treat the special this variable as an ordinary variable with
- // the name "this".
+
Handle<String> name() const { return name_; }
Mode mode() const { return mode_; }
bool is_accessed_from_inner_scope() const {
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index 48e43de..c23e2d5 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 0
+#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index e9a6f7f1..b4fd678 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) {
}
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x99);
+}
+
+
void Assembler::cmovq(Condition cc, Register dst, Register src) {
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
@@ -773,6 +780,15 @@ void Assembler::decq(const Operand& dst) {
}
+void Assembler::decl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_modrm(0x1, dst);
+}
+
+
void Assembler::decl(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -798,7 +814,7 @@ void Assembler::hlt() {
}
-void Assembler::idiv(Register src) {
+void Assembler::idivq(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(src);
@@ -807,6 +823,15 @@ void Assembler::idiv(Register src) {
}
+void Assembler::idivl(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_modrm(0x7, src);
+}
+
+
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1115,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) {
void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -1216,6 +1244,26 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
+void Assembler::movzxbl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzxwl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB7);
+ emit_operand(dst, src);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1521,7 +1569,7 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register reg, Immediate mask) {
- ASSERT(is_int8(mask.value_));
+ ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (reg.is(rax)) {
@@ -1540,7 +1588,7 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) {
- ASSERT(is_int8(mask.value_));
+ ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(rax, op);
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index 1b2a35c..015fa68 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -44,15 +44,25 @@ namespace internal {
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
- const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+ static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
return x == (x & kUInt32Mask);
}
static inline bool is_int32(int64_t x) {
- const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+ static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
return is_uint32(x - kMinIntValue);
}
+static inline bool uint_is_int32(uint64_t x) {
+ static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
+ return x < kMaxIntValue;
+}
+
+static inline bool is_uint32(uint64_t x) {
+ static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
+ return x < kMaxUIntValue;
+}
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -292,6 +302,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
+ times_half_pointer_size = times_4,
times_pointer_size = times_8
};
@@ -506,6 +517,8 @@ class Assembler : public Malloced {
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
void movzxbq(Register dst, const Operand& src);
+ void movzxbl(Register dst, const Operand& src);
+ void movzxwl(Register dst, const Operand& src);
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -627,13 +640,18 @@ class Assembler : public Malloced {
void decq(Register dst);
void decq(const Operand& dst);
+ void decl(Register dst);
void decl(const Operand& dst);
// Sign-extends rax into rdx:rax.
void cqo();
+ // Sign-extends eax into edx:eax.
+ void cdq();
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idiv(Register src);
+ void idivq(Register src);
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
+ void idivl(Register src);
// Signed multiply instructions.
void imul(Register src); // rdx:rax = rax * src.
@@ -737,6 +755,10 @@ class Assembler : public Malloced {
shift_32(dst, 0x5);
}
+ void shrl(Register dst, Immediate shift_amount) {
+ shift_32(dst, shift_amount, 0x5);
+ }
+
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc
index 459921c..087aaff 100644
--- a/V8Binding/v8/src/x64/builtins-x64.cc
+++ b/V8Binding/v8/src/x64/builtins-x64.cc
@@ -394,9 +394,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(less, &call_to_object);
+ __ j(below, &call_to_object);
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(less_equal, &push_receiver);
+ __ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
@@ -503,13 +503,160 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
-
- // TODO(x64): Implement inlined allocation.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ // TODO(X64): Enable debugger support, using debug_step_in_fp.
+
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
+ __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // rdi: constructor
+ // rax: initial map
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shl(rdi, Immediate(kPointerSizeLog2));
+ // rdi: size of new object
+ // Make sure that the maximum heap object size will never cause us
+ // problem here, because it is always greater than the maximum
+ // instance size that can be represented in a byte.
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(rbx, Operand(kScratchRegister, 0));
+ __ addq(rdi, rbx); // Calculate new top
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ movq(kScratchRegister, new_space_allocation_limit);
+ __ cmpq(rdi, Operand(kScratchRegister, 0));
+ __ j(above_equal, &rt_call);
+ // Allocated the JSObject, now initialize the fields.
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // rdi: start of next object
+ __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ Move(rcx, Factory::empty_fixed_array());
+ __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ // Set extra fields in the newly allocated object.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ { Label loop, entry;
+ __ Move(rdx, Factory::undefined_value());
+ __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rdi);
+ __ j(less, &loop);
+ }
+
+ // Mostly done with the JSObject. Add the heap tag and store the new top, so
+ // that we can continue and jump into the continuation code at any time from
+ // now on. Any failures need to undo the setting of the new top, so that the
+ // heap is in a consistent state and verifiable.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ or_(rbx, Immediate(kHeapObjectTag));
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(Operand(kScratchRegister, 0), rdi);
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ // Calculate unused properties past the end of the in-object properties.
+ __ subq(rdx, rcx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // rbx: JSObject
+ // rdi: start of next object (will be start of FixedArray)
+ // rdx: number of elements in properties array
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >
+ (FixedArray::kHeaderSize + 255*kPointerSize));
+ __ lea(rax, Operand(rdi, rdx, times_pointer_size, FixedArray::kHeaderSize));
+ __ movq(kScratchRegister, new_space_allocation_limit);
+ __ cmpq(rax, Operand(kScratchRegister, 0));
+ __ j(above_equal, &undo_allocation);
+ __ store_rax(new_space_allocation_top);
+
+ // Initialize the FixedArray.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rdx: number of elements
+ // rax: start of next object
+ __ Move(rcx, Factory::fixed_array_map());
+ __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
+ __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+
+ // Initialize the fields to undefined.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rax: start of next object
+ // rdx: number of elements
+ { Label loop, entry;
+ __ Move(rdx, Factory::undefined_value());
+ __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(below, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // rbx: JSObject
+ // rdi: FixedArray
+ __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+ // Continue with JSObject being successfully allocated
+ // rbx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // rbx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ xor_(rbx, Immediate(kHeapObjectTag)); // clear the heap tag
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(Operand(kScratchRegister, 0), rbx);
+ }
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
+ // Must restore rdi (constructor) before calling runtime.
__ movq(rdi, Operand(rsp, 0));
__ push(rdi);
__ CallRuntime(Runtime::kNewObject, 1);
@@ -562,7 +709,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(greater_equal, &exit);
+ __ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
diff --git a/V8Binding/v8/src/x64/cfg-x64.cc b/V8Binding/v8/src/x64/cfg-x64.cc
new file mode 100644
index 0000000..8d01ed2
--- /dev/null
+++ b/V8Binding/v8/src/x64/cfg-x64.cc
@@ -0,0 +1,323 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "codegen-x64.h"
+#include "debug.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ __ push(rdi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ movq(kScratchRegister, Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ __ push(kScratchRegister);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_limit);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(rax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // "movq rsp, rbp; pop rbp" has length 5. "ret k" has length 2.
+ const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2;
+ for (int i = 0; i < kPadding; ++i) {
+ __ int3();
+ }
+}
+
+
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+ // A test rax instruction after the call indicates to the IC code that it
+ // was inlined. Ensure there is not one after the call below.
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ pop(rbx); // Discard key.
+ } else {
+ key()->Get(masm, rcx);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ }
+ __ pop(rbx); // Discard receiver.
+ location()->Set(masm, rax);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!left()->is_on_stack()) left()->Push(masm);
+ right()->Push(masm);
+ GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ location()->Set(masm, rax);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
+ Comment cmnt(masm, "[ ReturnInstr");
+ value()->Get(masm, rax);
+}
+
+
+void Constant::Get(MacroAssembler* masm, Register reg) {
+ __ Move(reg, handle_);
+}
+
+
+void Constant::Push(MacroAssembler* masm) {
+ __ Push(handle_);
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(rbp, kOffset - loc->index() * kPointerSize);
+ }
+ default:
+ UNREACHABLE();
+ return Operand(rax, 0);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ Move(ToOperand(loc), handle_);
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ movq(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ movq(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // We dispatch to the value because in some cases (temp or constant) we
+ // can use special instruction sequences.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ movq(kScratchRegister, ToOperand(this));
+ __ movq(ToOperand(loc), kScratchRegister);
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(reg, rax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(rax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(rax);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, rax);
+ break;
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ movq(ToOperand(loc), rax);
+ break;
+ case STACK:
+ __ pop(ToOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index 9ed7e74..87f1040 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -97,6 +97,137 @@ CodeGenState::~CodeGenState() {
}
+// -------------------------------------------------------------------------
+// Deferred code objects
+//
+// These subclasses of DeferredCode add pieces of code to the end of generated
+// code. They are branched to from the generated code, and
+// keep some slower code out of the main body of the generated code.
+// Many of them call a code stub or a runtime function.
+
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ src_(src),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register src_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand on TOS+1. Returns operand as floating point number on FPU
+ // stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in src register. Returns operand as floating point number
+ // in XMM register
+ static void LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst);
+
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers in XMM registers.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
+ // Code pattern for loading floating point values onto the fp stack.
+ // Input values must be either smi or heap number objects (fp values).
+ // Requirements:
+ // Register version: operands in registers lhs and rhs.
+ // Stack version: operands on TOS+1 and TOS+2.
+ // Returns operands as floating point numbers on fp stack.
+ static void LoadFloatOperands(MacroAssembler* masm);
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+ // Code pattern for loading a floating point value and converting it
+ // to a 32 bit integer. Input value must be either a smi or a heap number
+ // object.
+ // Returns operands as 32-bit sign extended integers in a general purpose
+ // registers.
+ static void LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in rax, operand_2 in rdx; falls through on float or smi
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float);
+
+ // Allocate a heap number in new space with undefined value.
+ // Returns tagged pointer in result, or jumps to need_gc if new space is full.
+ static void AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result);
+};
+
+
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
@@ -389,6 +520,112 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key,
+ bool is_global)
+ : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
+ bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+ __ push(receiver_); // First IC argument.
+ __ push(key_); // Second IC argument.
+
+ // Calculate the delta from the IC call instruction to the map check
+ // movq instruction in the inlined version. This delta is stored in
+ // a test(rax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the movq instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ RelocInfo::Mode mode = is_global_
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ __ Call(ic, mode);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ // TODO(X64): Consider whether it's worth switching the test to a
+ // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+ // be generated normally.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(key_);
+ __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Push receiver and key arguments on the stack.
+ __ push(receiver_);
+ __ push(key_);
+ // Move value argument to eax as expected by the IC stub.
+ if (!value_.is(rax)) __ movq(rax, value_);
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instructions (initial movq)
+ // to the test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC), key and receiver
+ // registers.
+ if (!value_.is(rax)) __ movq(value_, rax);
+ __ pop(key_);
+ __ pop(receiver_);
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -2193,9 +2430,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// The receiver is the argument to the runtime call. It is the
// first value pushed when the reference was loaded to the
// frame.
- // TODO(X64): Enable this and the switch back to fast, once they work.
- // frame_->PushElementAt(target.size() - 1);
- // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ frame_->PushElementAt(target.size() - 1);
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
@@ -2203,20 +2439,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
Load(node->value());
} else {
- // Literal* literal = node->value()->AsLiteral();
+ Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- // Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
// or the right hand side is a different variable. TakeValue invalidates
// the target, with an implicit promise that it will be written to again
// before it is read.
- // TODO(X64): Implement TakeValue optimization. Check issue 150016.
- if (false) {
- // if (literal != NULL || (right_var != NULL && right_var != var)) {
- // target.TakeValue(NOT_INSIDE_TYPEOF);
+ if (literal != NULL || (right_var != NULL && right_var != var)) {
+ target.TakeValue(NOT_INSIDE_TYPEOF);
} else {
target.GetValue(NOT_INSIDE_TYPEOF);
}
@@ -2247,9 +2481,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// argument to the runtime call is the receiver, which is the
// first value pushed as part of the reference, which is below
// the lhs value.
- // TODO(X64): Enable this once ToFastProperties works.
- // frame_->PushElementAt(target.size());
- // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ frame_->PushElementAt(target.size());
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
@@ -3249,10 +3482,161 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
- // TODO(X64): Implement this function.
- // Ignore arguments and return undefined, to signal failure.
- frame_->Push(Factory::undefined_value());
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+ ASSERT(args->length() == 2);
+
+ Label slow_case;
+ Label end;
+ Label not_a_flat_string;
+ Label a_cons_string;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+
+ // Get register rcx to use as shift amount later.
+ Result shift_amount;
+ if (object.is_register() && object.reg().is(rcx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = object;
+ object = fresh;
+ __ movq(object.reg(), rcx);
+ }
+ if (index.is_register() && index.reg().is(rcx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = index;
+ index = fresh;
+ __ movq(index.reg(), rcx);
+ }
+ // There could be references to ecx in the frame. Allocating will
+ // spill them, otherwise spill explicitly.
+ if (shift_amount.is_valid()) {
+ frame_->Spill(rcx);
+ } else {
+ shift_amount = allocator()->Allocate(rcx);
+ }
+ ASSERT(shift_amount.is_register());
+ ASSERT(shift_amount.reg().is(rcx));
+ ASSERT(allocator_->count(rcx) == 1);
+
+ // We will mutate the index register and possibly the object register.
+ // The case where they are somehow the same register is handled
+ // because we only mutate them in the case where the receiver is a
+ // heap object and the index is not.
+ object.ToRegister();
+ index.ToRegister();
+ frame_->Spill(object.reg());
+ frame_->Spill(index.reg());
+
+ // We need a single extra temporary register.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+
+ // There is no virtual frame effect from here up to the final result
+ // push.
+
+ // If the receiver is a smi trigger the slow case.
+ ASSERT(kSmiTag == 0);
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ __ j(zero, &slow_case);
+
+ // If the index is negative or non-smi trigger the slow case.
+ ASSERT(kSmiTag == 0);
+ __ testl(index.reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000U)));
+ __ j(not_zero, &slow_case);
+ // Untag the index.
+ __ sarl(index.reg(), Immediate(kSmiTagSize));
+
+ __ bind(&try_again_with_new_string);
+ // Fetch the instance type of the receiver into rcx.
+ __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the slow case.
+ __ testb(rcx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &slow_case);
+
+ // Here we make assumptions about the tag values and the shifts needed.
+ // See the comment in objects.h.
+ ASSERT(kLongStringTag == 0);
+ ASSERT(kMediumStringTag + String::kLongLengthShift ==
+ String::kMediumLengthShift);
+ ASSERT(kShortStringTag + String::kLongLengthShift ==
+ String::kShortLengthShift);
+ __ and_(rcx, Immediate(kStringSizeMask));
+ __ addq(rcx, Immediate(String::kLongLengthShift));
+ // Fetch the length field into the temporary register.
+ __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+ __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
+ // Check for index out of range.
+ __ cmpl(index.reg(), temp.reg());
+ __ j(greater_equal, &slow_case);
+ // Reload the instance type (into the temp register this time)..
+ __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ ASSERT(kSeqStringTag == 0);
+ __ testb(temp.reg(), Immediate(kStringRepresentationMask));
+ __ j(not_zero, &not_a_flat_string);
+ // Check for 1-byte or 2-byte string.
+ __ testb(temp.reg(), Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the temp register.
+ __ movzxwl(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ __ bind(&ascii_string);
+ // Load the byte into the temp register.
+ __ movzxbl(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ ASSERT(kSmiTag == 0);
+ __ shl(temp.reg(), Immediate(kSmiTagSize));
+ __ jmp(&end);
+
+ // Handle non-flat strings.
+ __ bind(&not_a_flat_string);
+ __ and_(temp.reg(), Immediate(kStringRepresentationMask));
+ __ cmpb(temp.reg(), Immediate(kConsStringTag));
+ __ j(equal, &a_cons_string);
+ __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
+ __ j(not_equal, &slow_case);
+
+ // SlicedString.
+ // Add the offset to the index and trigger the slow case on overflow.
+ __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+ __ j(overflow, &slow_case);
+ // Getting the underlying string is done by running the cons string code.
+
+ // ConsString.
+ __ bind(&a_cons_string);
+ // Get the first of the two strings. Both sliced and cons strings
+ // store their source string at the same offset.
+ ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow_case);
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
+ __ Move(temp.reg(), Factory::undefined_value());
+
+ __ bind(&end);
+ frame_->Push(&temp);
}
@@ -3319,9 +3703,20 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi.
+ Result rbp_as_smi = allocator_->Allocate();
+ ASSERT(rbp_as_smi.is_valid());
+ __ movq(rbp_as_smi.reg(), rbp);
+ frame_->Push(&rbp_as_smi);
+}
+
+
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
frame_->SpillAll();
+ __ push(rsi);
// Make sure the frame is aligned like the OS expects.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -3334,22 +3729,70 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
// Call V8::RandomPositiveSmi().
__ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
- // Restore stack pointer from callee-saved register edi.
+ // Restore stack pointer from callee-saved register.
if (kFrameAlignment > 0) {
__ movq(rsp, rbx);
}
+ __ pop(rsi);
Result result = allocator_->Allocate(rax);
frame_->Push(&result);
}
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- // TODO(X64): Use inline floating point in the fast case.
+ JumpTarget done;
+ JumpTarget call_runtime;
ASSERT(args->length() == 1);
- // Load number.
+ // Load number and duplicate it.
Load(args->at(0));
+ frame_->Dup();
+
+ // Get the number into an unaliased register and load it onto the
+ // floating point stack still leaving one copy on the frame.
+ Result number = frame_->Pop();
+ number.ToRegister();
+ frame_->Spill(number.reg());
+ FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+ number.Unuse();
+
+ // Perform the operation on the number.
+ switch (op) {
+ case SIN:
+ __ fsin();
+ break;
+ case COS:
+ __ fcos();
+ break;
+ }
+
+ // Go slow case if argument to operation is out of range.
+ Result eax_reg = allocator()->Allocate(rax);
+ ASSERT(eax_reg.is_valid());
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
+ eax_reg.Unuse();
+ call_runtime.Branch(not_zero);
+
+ // Allocate heap number for result if possible.
+ Result scratch = allocator()->Allocate();
+ Result heap_number = allocator()->Allocate();
+ FloatingPointHelper::AllocateHeapNumber(masm_,
+ call_runtime.entry_label(),
+ scratch.reg(),
+ heap_number.reg());
+ scratch.Unuse();
+
+ // Store the result in the allocated heap number.
+ __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+ // Replace the extra copy of the argument with the result.
+ frame_->SetElementAt(0, &heap_number);
+ done.Jump();
+
+ call_runtime.Bind();
+ // Free ST(0) which was not popped before calling into the runtime.
+ __ ffree(0);
Result answer;
switch (op) {
case SIN:
@@ -3360,6 +3803,7 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
break;
}
frame_->Push(&answer);
+ done.Bind();
}
@@ -3379,7 +3823,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// functions to make sure they have 'Function' as their class.
__ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(less);
+ null.Branch(below);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@@ -3645,7 +4089,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
- __ testq(value.reg(), value.reg());
+ __ testl(value.reg(), value.reg());
dest->false_target()->Branch(zero);
__ testl(value.reg(), Immediate(kSmiTagMask));
dest->true_target()->Branch(zero);
@@ -3728,7 +4172,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
} else {
// Anything else is a runtime error.
Load(e);
- // frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
in_spilled_code_ = was_in_spilled_code;
@@ -3966,8 +4410,6 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- // TODO(X64): Enable more types of slot.
-
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
@@ -4130,7 +4572,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
- __ nop();
+ masm_->nop();
// Discard the global object. The result is in answer.
frame_->Drop();
return answer;
@@ -4420,108 +4862,6 @@ void CodeGenerator::Comparison(Condition cc,
}
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in src register. Returns operand as floating point number
- // in XMM register
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
- // floating point numbers in XMM registers.
- static void LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Code pattern for loading floating point values onto the fp stack.
- // Input values must be either smi or heap number objects (fp values).
- // Requirements:
- // Register version: operands in registers lhs and rhs.
- // Stack version: operands on TOS+1 and TOS+2.
- // Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm);
- static void LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
- // Code pattern for loading a floating point value and converting it
- // to a 32 bit integer. Input value must be either a smi or a heap number
- // object.
- // Returns operands as 32-bit sign extended integers in a general purpose
- // registers.
- static void LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in rax, operand_2 in rdx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float);
- // Allocate a heap number in new space with undefined value.
- // Returns tagged pointer in result, or jumps to need_gc if new space is full.
- static void AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch,
- Register result);
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
@@ -4700,7 +5040,7 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
- masm_->testq(rax, Immediate(-delta_to_patch_site));
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -4708,29 +5048,6 @@ void DeferredReferenceGetNamedValue::Generate() {
}
-
-
-// The result of src + value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
void DeferredInlineSmiAdd::Generate() {
__ push(dst_);
__ push(Immediate(value_));
@@ -4762,7 +5079,7 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
- __ push(Immediate(value_));
+ __ push(Immediate(value_)); // Note: sign extended.
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
@@ -4770,37 +5087,28 @@ void DeferredInlineSmiAddReversed::Generate() {
}
-// The result of src - value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract. The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
void DeferredInlineSmiSub::Generate() {
__ push(dst_);
- __ push(Immediate(value_));
+ __ push(Immediate(value_)); // Note: sign extended.
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
+void DeferredInlineSmiOperation::Generate() {
+ __ push(src_);
+ __ push(Immediate(value_)); // Note: sign extended.
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
@@ -4829,6 +5137,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
switch (op) {
case Token::ADD: {
@@ -4851,15 +5160,43 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Label add_success;
__ j(no_overflow, &add_success);
__ subl(operand->reg(), Immediate(smi_value));
- __ movsxlq(operand->reg(), operand->reg());
deferred->Jump();
__ bind(&add_success);
- __ movsxlq(operand->reg(), operand->reg());
deferred->BindExit();
frame_->Push(operand);
break;
}
// TODO(X64): Move other implementations from ia32 to here.
+
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ testl(operand->reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000)));
+ deferred->Branch(not_zero);
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+ } else {
+ __ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1));
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ break; // This break only applies if we generated code for MOD.
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
+
default: {
Result constant_operand(value);
if (reversed) {
@@ -4965,35 +5302,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
deferred->Branch(not_zero);
- if (!left_is_in_rax) __ movq(rax, left->reg());
- // Sign extend rax into rdx:rax.
- __ cqo();
+ // All operations on the smi values are on 32-bit registers, which are
+ // zero-extended into 64-bits by all 32-bit operations.
+ if (!left_is_in_rax) __ movl(rax, left->reg());
+ // Sign extend eax into edx:eax.
+ __ cdq();
// Check for 0 divisor.
- __ testq(right->reg(), right->reg());
+ __ testl(right->reg(), right->reg());
deferred->Branch(zero);
// Divide rdx:rax by the right operand.
- __ idiv(right->reg());
+ __ idivl(right->reg());
// Complete the operation.
if (op == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero. The
- // virtual frame is unchanged in this block, so local control flow
- // can use a Label rather than a JumpTarget.
+ // Check for negative zero result. If the result is zero, and the
+ // divisor is negative, return a floating point negative zero.
Label non_zero_result;
- __ testq(left->reg(), left->reg());
+ __ testl(left->reg(), left->reg());
__ j(not_zero, &non_zero_result);
- __ testq(right->reg(), right->reg());
+ __ testl(right->reg(), right->reg());
deferred->Branch(negative);
+ // The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmpq(rax, Immediate(0x40000000));
+ __ cmpl(rax, Immediate(0x40000000));
deferred->Branch(equal);
// Check that the remainder is zero.
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
deferred->Branch(not_zero);
// Tag the result and store it in the quotient register.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
@@ -5004,15 +5342,14 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Push(&quotient);
} else {
ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and
- // the dividend is negative, return a floating point negative
- // zero. The frame is unchanged in this block, so local control
- // flow can use a Label rather than a JumpTarget.
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, return a floating point negative zero.
Label non_zero_result;
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
__ j(not_zero, &non_zero_result);
- __ testq(left->reg(), left->reg());
+ __ testl(left->reg(), left->reg());
deferred->Branch(negative);
+ // The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
deferred->BindExit();
left->Unuse();
@@ -5056,9 +5393,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero);
// Untag both operands.
- __ movq(answer.reg(), left->reg());
- __ sar(answer.reg(), Immediate(kSmiTagSize));
- __ sar(rcx, Immediate(kSmiTagSize));
+ __ movl(answer.reg(), left->reg());
+ __ sarl(answer.reg(), Immediate(kSmiTagSize));
+ __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op) {
case Token::SAR:
@@ -5154,7 +5491,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
- __ sar(answer.reg(), Immediate(kSmiTagSize));
+ __ sarl(answer.reg(), Immediate(kSmiTagSize));
// Do multiplication of smis, leaving result in answer.
__ imull(answer.reg(), right->reg());
// Go slow on overflows.
@@ -5164,7 +5501,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
Label non_zero_result;
- __ testq(answer.reg(), answer.reg());
+ __ testl(answer.reg(), answer.reg());
__ j(not_zero, &non_zero_result);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), right->reg());
@@ -5183,6 +5520,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
break;
case Token::BIT_XOR:
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ xor_(answer.reg(), right->reg());
break;
@@ -5267,9 +5605,20 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Inlined named property load");
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
-
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
+ // Cannot use r12 for receiver, because that changes
+ // the distance between a call and a fixup location,
+ // due to a special encoding of r12 as r/m in a ModR/M byte.
+ if (receiver.reg().is(r12)) {
+ // Swap receiver and value.
+ __ movq(value.reg(), receiver.reg());
+ Result temp = receiver;
+ receiver = value;
+ value = temp;
+ cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
+ }
+
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(value.reg(),
receiver.reg(),
@@ -5288,7 +5637,8 @@ void Reference::GetValue(TypeofState typeof_state) {
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
+ // Don't use deferred->Branch(...), since that might add coverage code.
+ masm->j(not_equal, deferred->entry_label());
// The delta from the patch label to the load offset must be
// statically known.
@@ -5315,26 +5665,118 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
+ if (cgen_->loop_nesting() > 0) {
+ Comment cmnt(masm, "[ Inlined load from keyed Property");
- // TODO(x64): Implement inlined loads for keyed properties.
- // Make sure to load length field as a 32-bit quantity.
- // Comment cmnt(masm, "[ Load from keyed Property");
-
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ testl(key.reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ movl(index.reg(), key.reg());
+ __ shrl(index.reg(), Immediate(kSmiTagSize));
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+
+ } else {
+ Comment cmnt(masm, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
@@ -5361,13 +5803,16 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST) {
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
GetValue(typeof_state);
return;
}
// Only non-constant, frame-allocated parameters and locals can reach
- // here.
+ // here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
@@ -5401,15 +5846,105 @@ void Reference::SetValue(InitState init_state) {
case KEYED: {
Comment cmnt(masm, "[ Store to keyed Property");
- // TODO(x64): Implement inlined version of keyed stores.
+ // Generate inlined version of the keyed store if the code is in
+ // a loop and the key is likely to be a smi.
+ Property* property = expression()->AsProperty();
+ ASSERT(property != NULL);
+ SmiAnalysis* key_smi_analysis = property->key()->type();
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- cgen_->frame()->Push(&answer);
+ if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+ Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ Result value = cgen_->frame()->Pop();
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+
+ Result tmp = cgen_->allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+
+ // Determine whether the value is a constant before putting it
+ // in a register.
+ bool value_is_constant = value.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ value.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(value.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the value is a smi if it is not a constant.
+ // We can skip the write barrier for smis and constants.
+ if (!value_is_constant) {
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+
+ // Check that the key is a non-negative smi.
+ __ testl(key.reg(),
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+ deferred->Branch(not_zero);
+
+ // Check that the receiver is not a smi.
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ // Check that the receiver is a JSArray.
+ __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the
+ // length of the JSArray are smis, so compare only low 32 bits.
+ __ cmpl(key.reg(),
+ FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+ deferred->Branch(greater_equal);
+
+ // Get the elements array from the receiver and check that it
+ // is a flat array (not a dictionary).
+ __ movq(tmp.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ // Bind the deferred code patch site to be able to locate the
+ // fixed array map comparison. When debugging, we patch this
+ // comparison to always fail so that we will hit the IC call
+ // in the deferred code which will allow the debugger to
+ // break for fast case stores.
+ __ bind(deferred->patch_site());
+ // Avoid using __ to ensure the distance from patch_site
+ // to the map address is always the same.
+ masm->movq(kScratchRegister, Factory::fixed_array_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ movq(Operand(tmp.reg(),
+ key.reg(),
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ value.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+ } else {
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ masm->nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
@@ -5447,7 +5982,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ and_(rcx, Immediate(kStringSizeMask));
__ cmpq(rcx, Immediate(kShortStringTag));
__ j(not_equal, &true_result); // Empty string is always short.
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
__ shr(rdx, Immediate(String::kShortLengthShift));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6135,22 +6670,23 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
// Fetch top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
- __ movq(rdx, Operand(kScratchRegister, 0));
+ __ movq(rsp, Operand(kScratchRegister, 0));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
- __ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
+ __ cmpq(Operand(rsp, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
- __ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(rsp);
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- __ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
+ __ pop(rax);
__ store_rax(handler_address);
// Set external caught exception to false.
@@ -6163,14 +6699,12 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
ExternalReference pending_exception(Top::k_pending_exception_address);
__ store_rax(pending_exception);
- // Restore the stack to the address of the ENTRY handler
- __ movq(rsp, rdx);
-
// Clear the context pointer;
__ xor_(rsi, rsi);
// Restore registers from handler.
-
+ ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ StackHandlerConstants::kFPOffset);
__ pop(rbp); // FP
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
@@ -6286,6 +6820,9 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
// Setup frame.
__ push(rbp);
@@ -6311,6 +6848,17 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ load_rax(c_entry_fp);
__ push(rax);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, &not_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(&not_outermost_js);
+#endif
+
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -6353,6 +6901,16 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Pop next_sp.
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, &not_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
// Restore the top frame descriptor from the stack.
__ bind(&exit);
__ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
@@ -6418,6 +6976,24 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ testl(number, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ sarl(number, Immediate(kSmiTagSize));
+ __ push(number);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register src,
@@ -6430,7 +7006,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
__ jmp(&done);
__ bind(&load_smi);
- __ sar(src, Immediate(kSmiTagSize));
+ __ sarl(src, Immediate(kSmiTagSize));
__ cvtlsi2sd(dst, src);
__ bind(&done);
@@ -6486,6 +7062,7 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
__ bind(&done);
}
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -6520,6 +7097,7 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
__ bind(&done);
}
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float) {
Label test_other, done;
@@ -6557,13 +7135,14 @@ const char* GenericBinaryOpStub::GetName() {
}
}
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
// Smi check both operands.
__ movq(rcx, rbx);
- __ or_(rcx, rax);
+ __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
__ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow);
@@ -6571,14 +7150,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::ADD: {
__ addl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
break;
}
case Token::SUB: {
__ subl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
break;
}
@@ -6586,27 +7163,25 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
- __ sar(rax, Immediate(kSmiTagSize));
+ __ sarl(rax, Immediate(kSmiTagSize));
// Do multiplication.
__ imull(rax, rbx); // multiplication of smis; result in eax
// Go slow on overflows.
__ j(overflow, slow);
// Check for negative zero result.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
- __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
+ __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
break;
case Token::DIV:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
+ // Sign extend eax into edx:eax.
+ __ cdq();
// Check for 0 divisor.
- __ testq(rbx, rbx);
+ __ testl(rbx, rbx);
__ j(zero, slow);
- // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
- __ idiv(rbx);
+ // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
+ __ idivl(rbx);
// Check that the remainder is zero.
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
__ j(not_zero, slow);
// Check for the corner case of dividing the most negative smi
// by -1. We cannot use the overflow flag, since it is not set
@@ -6614,28 +7189,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
// TODO(X64): TODO(Smi): Smi implementation dependent constant.
// Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
- __ cmpq(rax, Immediate(0x40000000));
+ __ cmpl(rax, Immediate(0x40000000));
__ j(equal, slow);
// Check for negative zero result.
- __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
+ __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
// Tag the result and store it in register rax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
break;
case Token::MOD:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
+ // Sign extend eax into edx:eax
+ __ cdq();
// Check for 0 divisor.
- __ testq(rbx, rbx);
+ __ testl(rbx, rbx);
__ j(zero, slow);
- // Divide rdx:rax by rbx.
- __ idiv(rbx);
+ // Divide edx:eax by ebx.
+ __ idivl(rbx);
// Check for negative zero result.
- __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
+ __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
// Move remainder to register rax.
- __ movq(rax, rdx);
+ __ movl(rax, rdx);
break;
case Token::BIT_OR:
@@ -6655,7 +7229,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SHR:
case Token::SAR:
// Move the second operand into register ecx.
- __ movq(rcx, rbx);
+ __ movl(rcx, rbx);
// Remove tags from operands (but keep sign).
__ sarl(rax, Immediate(kSmiTagSize));
__ sarl(rcx, Immediate(kSmiTagSize));
@@ -6702,7 +7276,6 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
-
if (flags_ == SMI_CODE_IN_STUB) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index bb4b538..b1c61d8 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -361,7 +361,7 @@ class CodeGenerator: public AstVisitor {
#define DEF_VISIT(type) \
void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
@@ -534,6 +534,8 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
+ void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
@@ -548,7 +550,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Node* node);
+ void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -593,10 +595,72 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+// -------------------------------------------------------------------------
+// Code stubs
+//
+// These independent code objects are created once, and used multiple
+// times by generated code to perform common tasks, often the slow
+// case of a JavaScript operation. They are all subclasses of CodeStub,
+// which is declared in code-stubs.h.
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/V8Binding/v8/src/x64/disasm-x64.cc b/V8Binding/v8/src/x64/disasm-x64.cc
index 83fa9cd..cc8365c 100644
--- a/V8Binding/v8/src/x64/disasm-x64.cc
+++ b/V8Binding/v8/src/x64/disasm-x64.cc
@@ -88,7 +88,7 @@ static ByteMnemonic two_operands_instr[] = {
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -97,6 +97,7 @@ static ByteMnemonic two_operands_instr[] = {
{ 0x89, OPER_REG_OP_ORDER, "mov" },
{ 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
{ 0x8B, REG_OPER_OP_ORDER, "mov" },
+ { 0x8D, REG_OPER_OP_ORDER, "lea" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -139,7 +140,7 @@ static ByteMnemonic short_immediate_instr[] = {
static const char* conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "a", "na",
+ "o", "no", "c", "nc", "z", "nz", "na", "a",
"s", "ns", "pe", "po", "l", "ge", "le", "g"
};
@@ -252,6 +253,24 @@ void InstructionTable::AddJumpConditionalShort() {
static InstructionTable instruction_table;
+static InstructionDesc cmov_instructions[16] = {
+ {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
+};
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -533,7 +552,7 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
value = 0; // Initialize variables on all paths to satisfy the compiler.
count = 0;
}
- AppendToBuffer(V8_PTR_PREFIX"x", value);
+ AppendToBuffer("%" V8_PTR_PREFIX "x", value);
return count;
}
@@ -687,7 +706,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
byte modrm = *(data + 1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
- ASSERT(regop < 8);
+ regop &= 0x7; // The REX.R bit does not affect the operation.
int imm8 = -1;
int num_bytes = 2;
if (mod != 3) {
@@ -966,6 +985,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
+ } else if ((opcode & 0xF0) == 0x40) {
+ // CMOVcc: conditional move.
+ int condition = opcode & 0x0F;
+ const InstructionDesc& idesc = cmov_instructions[condition];
+ byte_size_operand_ = idesc.byte_size_operation;
+ current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1343,6 +1369,39 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
+ case 0xA1: // Fall through.
+ case 0xA3:
+ switch (operand_size()) {
+ case DOUBLEWORD_SIZE: {
+ const char* memory_location = NameOfAddress(
+ reinterpret_cast<byte*>(
+ *reinterpret_cast<int32_t*>(data + 1)));
+ if (*data == 0xA1) { // Opcode 0xA1
+ AppendToBuffer("movzxlq rax,(%s)", memory_location);
+ } else { // Opcode 0xA3
+ AppendToBuffer("movzxlq (%s),rax", memory_location);
+ }
+ data += 5;
+ break;
+ }
+ case QUADWORD_SIZE: {
+ // New x64 instruction mov rax,(imm_64).
+ const char* memory_location = NameOfAddress(
+ *reinterpret_cast<byte**>(data + 1));
+ if (*data == 0xA1) { // Opcode 0xA1
+ AppendToBuffer("movq rax,(%s)", memory_location);
+ } else { // Opcode 0xA3
+ AppendToBuffer("movq (%s),rax", memory_location);
+ }
+ data += 9;
+ break;
+ }
+ default:
+ UnimplementedInstruction();
+ data += 2;
+ }
+ break;
+
case 0xA8:
AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
data += 2;
diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc
index db74baf..86008eb 100644
--- a/V8Binding/v8/src/x64/ic-x64.cc
+++ b/V8Binding/v8/src/x64/ic-x64.cc
@@ -42,16 +42,181 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register r0, Register r1, Register r2,
+ Register name) {
+ // Register use:
+ //
+ // r0 - used to hold the property dictionary.
+ //
+ // r1 - initially the receiver
+ // - used for the index into the property dictionary
+ // - holds the result on exit.
+ //
+ // r2 - used to hold the capacity of the property dictionary.
+ //
+ // name - holds the name of the property and is unchanged.
+
+ Label done;
+
+ // Check for the absence of an interceptor.
+ // Load the map into r0.
+ __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
+ // Test the has_named_interceptor bit in the map.
+ __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
+ Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+ // Jump to miss if the interceptor bit is set.
+ __ j(not_zero, miss_label);
+
+ // Bail out if we have a JS global proxy object.
+ __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+ __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, miss_label);
+
+ // Possible work-around for http://crbug.com/16276.
+ __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, miss_label);
+ __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(equal, miss_label);
+
+ // Check that the properties array is a dictionary.
+ __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+ __ movq(r2, FieldOperand(r0, kCapacityOffset));
+ __ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
+ __ decl(r2);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ for (int i = 0; i < kProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movl(r1, FieldOperand(name, String::kLengthOffset));
+ __ shrl(r1, Immediate(String::kHashShift));
+ if (i > 0) {
+ __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r1, r2);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+
+ // Check if the key is identical to the name.
+ __ cmpq(name, Operand(r0, r1, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ if (i != kProbes - 1) {
+ __ j(equal, &done);
+ } else {
+ __ j(not_equal, miss_label);
+ }
+ }
+
+ // Check that the value is a normal property.
+ __ bind(&done);
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(r1,
+ Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+ Register value) {
+ Label done;
+ // Check if the value is a Smi.
+ __ testl(value, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ // Check if the object has been loaded.
+ __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
+ Immediate(1 << Map::kNeedsLoading));
+ __ j(not_zero, miss);
+ __ bind(&done);
+}
+
+
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+ // Arguments are address of start of call sequence that called
+ // the IC,
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
+ // The keyed load has a fast inlined case if the IC call instruction
+ // is immediately followed by a test instruction.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ // Fetch the offset from the test instruction to the map compare
+ // instructions (starting with the 64-bit immediate mov of the map
+ // address). This offset is stored in the last 4 bytes of the 5
+ // byte test instruction.
+ Address delta_address = test_instruction_address + 1;
+ int delta = *reinterpret_cast<int*>(delta_address);
+ // Compute the map address. The map address is in the last 8 bytes
+ // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
+ // to the offset to get the map address.
+ Address map_address = test_instruction_address + delta + 2;
+ // Patch the map check.
+ *(reinterpret_cast<Object**>(map_address)) = map;
+ return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Insert null as the map to check for to make sure the map check fails
+ // sending control flow to the IC instead of the inlined version.
+ PatchInlinedLoad(address, Heap::null_value());
}
+
void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Insert null as the elements map to check for. This will make
+ // sure that the elements fast-case map check fails so that control
+ // flows to the IC instead of the inlined version.
+ PatchInlinedStore(address, Heap::null_value());
}
+
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- UNIMPLEMENTED();
+ // Restore the fast-case elements map check so that the inlined
+ // version can be used again.
+ PatchInlinedStore(address, Heap::fixed_array_map());
}
@@ -65,127 +230,288 @@ void KeyedLoadIC::Generate(MacroAssembler* masm,
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(rcx);
- __ push(rax);
- __ push(rbx);
+ __ push(rcx); // receiver
+ __ push(rax); // name
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
}
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+ ASSERT(exponent <= 9);
+ ASSERT(exponent >= 1);
+ int answer = 10;
+ for (int i = 1; i < exponent; i++) answer *= 10;
+ return answer;
+}
+#endif
+
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
+ Label slow, fast, check_string, index_int, index_string;
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ // Load name and receiver.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ j(below, &slow);
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+
+ // Check that the key is a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string);
+ __ sarl(rax, Immediate(kSmiTagSize));
+ // Get the elements array of the object.
+ __ bind(&index_int);
+ __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+ // Check that the key (index) is within bounds.
+ __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(below, &fast); // Unsigned comparison rejects negative indices.
+ // Slow case: Load name and receiver from stack and jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ __ bind(&check_string);
+ // The key is not a smi.
+ // Is it a string?
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, &slow);
+ // Is the string an array index, with cached numeric value?
+ __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ testl(rbx, Immediate(String::kIsArrayIndexMask));
+
+ // If the string is a symbol, do a quick inline probe of the receiver's
+ // dictionary, if it exists.
+ __ j(not_zero, &index_string); // The value in rbx is used at jump target.
+ __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, &slow);
+ // Probe the dictionary leaving result in ecx.
+ GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+ GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
+ __ movq(rax, rcx);
+ __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ ret(0);
+ // Array index string: If short enough use cache in length/hash field (ebx).
+ // We assert that there are enough bits in an int32_t after the hash shift
+ // bits have been subtracted to allow space for the length and the cached
+ // array index.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << (String::kShortLengthShift - String::kHashShift)));
+ __ bind(&index_string);
+ const int kLengthFieldLimit =
+ (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+ __ cmpl(rbx, Immediate(kLengthFieldLimit));
+ __ j(above_equal, &slow);
+ __ movl(rax, rbx);
+ __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
+ __ shrl(rax, Immediate(String::kLongLengthShift));
+ __ jmp(&index_int);
+ // Fast case: Do the load.
+ __ bind(&fast);
+ __ movq(rax, Operand(rcx, rax, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Cmp(rax, Factory::the_hole_value());
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
}
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
-
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- // Never patch the map in the map check, so the check always fails.
- return false;
-}
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- // Never patch the map in the map check, so the check always fails.
- return false;
-}
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
+ __ pop(rcx);
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(Operand(rsp, 1 * kPointerSize)); // key
+ __ push(rax); // value
+ __ push(rcx); // return address
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED();
- return NULL;
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(f, 3);
}
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* object,
- JSObject* holder,
- Object* callback) {
- UNIMPLEMENTED();
- return NULL;
-}
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* object,
- JSObject* holder,
- int index) {
- UNIMPLEMENTED();
- return NULL;
-}
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : transition map
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
+ __ pop(rbx);
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(rcx); // transition map
+ __ push(rax); // value
+ __ push(rbx); // return address
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED();
- return NULL;
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
}
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rsp[0] : return address
- // -- rsp[8] : key
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
+ Label slow, fast, array, extra;
- // Move the return address below the arguments.
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
+ // Check that the object isn't a smi.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Get the key from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
+ // Check that the key is a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // If it is a smi, make sure it is zero-extended, so it can be
+ // used as an index in a memory operand.
+ __ movl(rbx, rbx); // Clear the high bits of rbx.
+
+ __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JS object.
+ __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ // rax: value
+ // rdx: JSObject
+ // rbx: index (as a smi), zero-extended.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+ // Untag the key (for checking against untagged length in the fixed array).
+ __ movl(rdx, rbx);
+ __ sarl(rdx, Immediate(kSmiTagSize));
+ __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+ // rax: value
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ __ j(below, &fast);
+
+
+ // Slow case: Push extra copies of the arguments (3).
+ __ bind(&slow);
__ pop(rcx);
__ push(Operand(rsp, 1 * kPointerSize));
__ push(Operand(rsp, 1 * kPointerSize));
__ push(rax);
__ push(rcx);
-
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3);
-}
-
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED();
- return NULL;
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // rax: value
+ // rdx: JSArray
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ // flags: compare (rbx, rdx.length())
+ __ j(not_equal, &slow); // do not leave holes in the array
+ __ sarl(rbx, Immediate(kSmiTagSize)); // untag
+ __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ // Restore tag and increment.
+ __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+ __ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
+ __ jmp(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode; if it is the
+ // length is always a smi.
+ __ bind(&array);
+ // rax: value
+ // rdx: JSArray
+ // rbx: index (as a smi)
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ j(above_equal, &extra);
+
+
+ // Fast case: Do the store.
+ __ bind(&fast);
+ // rax: value
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ __ movq(Operand(rcx, rbx, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ rax);
+ // Update write barrier for the elements array address.
+ __ movq(rdx, rax);
+ __ RecordWrite(rcx, 0, rdx, rbx);
+ __ ret(0);
}
@@ -236,13 +562,175 @@ void CallIC::Generate(MacroAssembler* masm,
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
}
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rsp[0] return address
+ // rsp[8] argument argc
+ // rsp[16] argument argc - 1
+ // ...
+ // rsp[argc * 8] argument 1
+ // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 2) * 8] function name
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver
+ __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &number);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
+ __ j(not_equal, &non_number);
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, rdx);
+ __ jmp(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &non_string);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, rdx);
+ __ jmp(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ Cmp(rdx, Factory::true_value());
+ __ j(equal, &boolean);
+ __ Cmp(rdx, Factory::false_value());
+ __ j(not_equal, &miss);
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
// Cache miss: Jump to runtime.
+ __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+ int argc,
+ bool is_global_object,
+ Label* miss) {
+ // Search dictionary - put result in register edx.
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
+
+ // Move the result to register rdi and check that it isn't a smi.
+ __ movq(rdi, rdx);
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the value is a JavaScript function.
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
+ __ j(not_equal, miss);
+ // Check that the function has been loaded.
+ __ testb(FieldOperand(rdx, Map::kBitField2Offset),
+ Immediate(1 << Map::kNeedsLoading));
+ __ j(not_zero, miss);
+
+ // Patch the receiver with the global proxy if necessary.
+ if (is_global_object) {
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ }
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rsp[0] return address
+ // rsp[8] argument argc
+ // rsp[16] argument argc - 1
+ // ...
+ // rsp[argc * 8] argument 1
+ // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 2) * 8] function name
+ // -----------------------------------
+
+ Label miss, global_object, non_global_object;
+
+ // Get the receiver of the function from the stack.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack.
+ __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the receiver is a valid JS object.
+ // Because there are so many map checks and type checks, do not
+ // use CmpObjectType, but load map and type into registers.
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object.
+ __ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, &global_object);
+ __ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(not_equal, &non_global_object);
+
+ // Accessing global object: Load and invoke.
+ __ bind(&global_object);
+ // Check that the global object does not require access checks.
+ __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+ __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_equal, &miss);
+ GenerateNormalHelper(masm, argc, true, &miss);
+
+ // Accessing non-global object: Check for access to global proxy.
+ Label global_proxy, invoke;
+ __ bind(&non_global_object);
+ __ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, &global_proxy);
+ // Check that the non-global, non-global-proxy object does not
+ // require access checks.
+ __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+ __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_equal, &miss);
+ __ bind(&invoke);
+ GenerateNormalHelper(masm, argc, false, &miss);
+
+ // Global object proxy access: Check access rights.
+ __ bind(&global_proxy);
+ __ CheckAccessGlobalProxy(rdx, rax, &miss);
+ __ jmp(&invoke);
+
// Cache miss: Jump to runtime.
+ __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -253,7 +741,10 @@ const int LoadIC::kOffsetToLoadInstruction = 20;
void LoadIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Reset the map check of the inlined inobject property load (if
+ // present) to guarantee failure by holding an invalid map (the null
+ // value). The offset can be patched to anything.
+ PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
@@ -266,11 +757,10 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ movq(rax, Operand(rsp, kPointerSize));
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(rax);
- __ push(rcx);
- __ push(rbx);
+ __ push(rax); // receiver
+ __ push(rcx); // name
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -278,9 +768,22 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
+
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
@@ -320,13 +823,50 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
+
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
- // TODO(X64): Implement this function. Until then, the code is not patched.
- return false;
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
+ // If the instruction following the call is not a test eax, nothing
+ // was inlined.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction.
+ int delta = *reinterpret_cast<int*>(delta_address);
+
+ // The map address is the last 8 bytes of the 10-byte
+ // immediate move instruction, so we add 2 to get the
+ // offset to the last 8 bytes.
+ Address map_address = test_instruction_address + delta + 2;
+ *(reinterpret_cast<Object**>(map_address)) = map;
+
+ // The offset is in the 32-bit displacement of a seven byte
+ // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
+ // so we add 3 to get the offset of the displacement.
+ Address offset_address =
+ test_instruction_address + delta + kOffsetToLoadInstruction + 3;
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+ return true;
}
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
@@ -336,19 +876,33 @@ void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(Operand(rsp, 0));
- __ push(rcx);
- __ push(rax);
- __ push(rbx);
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 3);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : Map (target of map transition)
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // transition map
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index 457011b..2219a5a 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
+#include "serialize.h"
#include "debug.h"
namespace v8 {
@@ -45,11 +46,163 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
-// TODO(x64): For now, the write barrier is disabled on x64 and we
-// therefore generate no code. This should be fixed when the write
-// barrier is enabled.
-void MacroAssembler::RecordWrite(Register object, int offset,
- Register value, Register scratch) {
+
+static void RecordWriteHelper(MacroAssembler* masm,
+ Register object,
+ Register addr,
+ Register scratch) {
+ Label fast;
+
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
+ ASSERT(is_int32(~Page::kPageAlignmentMask));
+ masm->and_(object,
+ Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+ Register page_start = object;
+
+ // Compute the bit addr in the remembered set/index of the pointer in the
+ // page. Reuse 'addr' as pointer_offset.
+ masm->subq(addr, page_start);
+ masm->shr(addr, Immediate(kPointerSizeLog2));
+ Register pointer_offset = addr;
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
+ masm->j(less, &fast);
+
+ // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+ // extra remembered set after the large object.
+
+ // Load the array length into 'scratch'.
+ masm->movl(scratch,
+ Operand(page_start,
+ Page::kObjectStartOffset + FixedArray::kLengthOffset));
+ Register array_length = scratch;
+
+ // Extra remembered set starts right after the large object (a FixedArray), at
+ // page_start + kObjectStartOffset + objectSize
+ // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+ // Add the delta between the end of the normal RSet and the start of the
+ // extra RSet to 'page_start', so that addressing the bit using
+ // 'pointer_offset' hits the extra RSet words.
+ masm->lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ masm->bind(&fast);
+ masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits of three registers (object, address and
+ // scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class AddressBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ RecordWriteHelper(masm, object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch) {
+ // First, check if a remembered set write is even needed. The tests below
+ // catch stores of Smis and stores into young gen (which does not have space
+ // for the remembered set bits.
+ Label done;
+
+ // Test that the object address is not in the new space. We cannot
+ // set remembered set bits in the new space.
+ movq(value, object);
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(value, kScratchRegister);
+ j(equal, &done);
+
+ if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+ // Compute the bit offset in the remembered set, leave it in 'value'.
+ lea(value, Operand(object, offset));
+ ASSERT(is_int32(Page::kPageAlignmentMask));
+ and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+ shr(value, Immediate(kObjectAlignmentBits));
+
+ // Compute the page address from the heap object pointer, leave it in
+ // 'object' (immediate value is sign extended).
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ bts(Operand(object, Page::kRSetOffset), value);
+ } else {
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
+ // into an array of pointers.
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ }
+ // If we are already generating a shared stub, not inlining the
+ // record write code isn't going to save us any memory.
+ if (generating_stub()) {
+ RecordWriteHelper(this, object, dst, value);
+ } else {
+ RecordWriteStub stub(object, dst, value);
+ CallStub(&stub);
+ }
+ }
+
+ bind(&done);
}
@@ -71,9 +224,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
- testq(result, result);
+ testl(result, result);
j(not_zero, &ok);
- testq(op, op);
+ testl(op, op);
j(sign, then_label);
bind(&ok);
}
@@ -151,6 +304,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
int num_arguments) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : argument num_arguments - 1
+ // ...
+ // -- rsp[8 * num_arguments] : argument 0 (receiver)
+ // -----------------------------------
+
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
@@ -311,6 +471,17 @@ void MacroAssembler::Push(Handle<Object> source) {
}
+void MacroAssembler::Push(Smi* source) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ push(kScratchRegister);
+ } else {
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
+ push(Immediate(smi));
+ }
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
movq(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -356,6 +527,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
movq(kScratchRegister, code_object, rmode);
#ifdef DEBUG
+ // Patch target is kPointer size bytes *before* target label.
Label target;
bind(&target);
#endif
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index 2ee6eea..cba55eb 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
+ void Push(Smi* smi);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -175,11 +176,13 @@ class MacroAssembler: public Assembler {
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Compare object type for heap object.
+ // Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
// They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
+ // Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
// FCmp is similar to integer cmp, but requires unsigned
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index ce7886b..091c826 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -36,6 +36,645 @@
namespace v8 {
namespace internal {
+//-----------------------------------------------------------------------------
+// StubCompiler static helper functions
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ Label miss;
+
+ __ movq(kScratchRegister, key_offset);
+ // Check that the key in the entry matches the name.
+ __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ j(not_equal, &miss);
+ // Get the code entry from the cache.
+ // Use key_offset + kPointerSize, rather than loading value_offset.
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, offset, times_4, kPointerSize));
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+ // Jump to the first instruction in the code stub.
+ __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = Builtins::builtin(Builtins::LoadIC_Miss);
+ } else {
+ code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ movq(prototype,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ movq(prototype,
+ FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ movq(prototype,
+ FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ movq(dst, FieldOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movq(dst, FieldOperand(dst, offset));
+ }
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ __ movq(kScratchRegister, Handle<Object>(interceptor),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ push(kScratchRegister);
+ __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra) {
+ Label miss;
+ USE(extra); // The register extra is not used on the X64 platform.
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 16.
+ ASSERT(sizeof(Entry) == 16);
+
+ // Make sure the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Builtins::Name storage_extend,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the object isn't a smi.
+ __ testl(receiver_reg, Immediate(kSmiTagMask));
+ __ j(zero, miss_label);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, miss_label);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Move(rcx, Handle<Map>(transition));
+ Handle<Code> ic(Builtins::builtin(storage_extend));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+ return;
+ }
+
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Handle<Map>(transition));
+ }
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(FieldOperand(receiver_reg, offset), rax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, rax);
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch, offset), rax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, rax);
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ }
+
+ // Return the value (register rax).
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, miss_label);
+
+ // Load length directly from the JS array.
+ __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ ret(0);
+}
+
+
+// Generate code to check if an object is a string. If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the object isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, smi);
+
+ // Check that the object is a string.
+ __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ __ testl(scratch, Immediate(kNotStringTag));
+ __ j(not_zero, non_string_object);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss) {
+ Label load_length, check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch register.
+ GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+ // Load length directly from the string.
+ __ bind(&load_length);
+ __ and_(scratch, Immediate(kStringSizeMask));
+ __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
+ // rcx is also the receiver.
+ __ lea(rcx, Operand(scratch, String::kLongLengthShift));
+ __ shr(rax); // rcx is implicit shift register.
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ ret(0);
+
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
+
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, receiver, scratch, miss, miss);
+ __ jmp(&load_length);
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ movq(rax, Immediate(5));
+ __ movq(rbx, ref);
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+}
+
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register result,
+ Register scratch,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, result, miss_label);
+ if (!result.is(rax)) __ movq(rax, result);
+ __ ret(0);
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ rax,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ pop(scratch2); // save old return address
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+ __ push(name_);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ pop(scratch); // save old return address
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+ __ push(scratch); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5);
+ }
+
+ private:
+ Register name_;
+};
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit CallInterceptorCompiler(const ParameterCount& arguments)
+ : arguments_(arguments), argc_(arguments.immediate()) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ __ EnterInternalFrame();
+ __ push(holder); // save the holder
+
+ CompileCallLoadPropertyWithInterceptor(
+ masm,
+ receiver,
+ holder,
+ // Under EnterInternalFrame this refers to name.
+ Operand(rbp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ __ pop(receiver); // restore holder
+ __ LeaveInternalFrame();
+
+ __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ Label invoke;
+ __ j(not_equal, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ if (lookup->holder()->IsGlobalObject()) {
+ __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
+ }
+
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ __ Move(rdi, Handle<JSFunction>(function));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments_,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ Operand(rbp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ movq(rax, Immediate(5));
+ __ movq(rbx, ref);
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+};
+
+
+#undef __
+
#define __ ACCESS_MASM((masm()))
@@ -133,13 +772,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), rdx, holder,
rbx, rcx, name, &miss);
- // Make sure object->elements()->map() != Heap::dictionary_array_map()
+ // Make sure object->HasFastElements().
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::hash_table_map());
- __ j(equal, &miss);
+ Factory::fixed_array_map());
+ __ j(not_equal, &miss);
break;
default:
@@ -227,11 +866,62 @@ Object* CallStubCompiler::CompileCallField(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* a,
- JSObject* b,
- String* c) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+ Label miss;
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments());
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ rdx,
+ rbx,
+ rcx,
+ &miss);
+
+ // Restore receiver.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the function really is a function.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ }
+
+ // Invoke the function.
+ __ movq(rdi, rax);
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(argc);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
@@ -252,8 +942,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 2) * 8] function name
Label miss;
- __ IncrementCounter(&Counters::call_global_inline, 1);
-
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -289,6 +977,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ __ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -297,7 +986,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::call_global_inline, 1);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -307,12 +995,25 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
- JSObject* b,
- AccessorInfo* c,
- String* d) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ callback, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
@@ -327,7 +1028,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -348,7 +1049,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -358,11 +1059,37 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
- JSObject* b,
- String* c) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ rax,
+ rcx,
+ rdx,
+ rbx,
+ name,
+ &miss);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
@@ -378,10 +1105,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
-
// Get the receiver from the stack.
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
@@ -407,10 +1132,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
__ ret(0);
__ bind(&miss);
- __ DecrementCounter(&Counters::named_load_global_inline, 1);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -419,11 +1144,234 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
- AccessorInfo* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ callback, name, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+ value, name, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ rcx,
+ rax,
+ rdx,
+ rbx,
+ name,
+ &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, &miss);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ pop(rbx); // remove the return address
+ __ push(Operand(rsp, 0)); // receiver
+ __ Push(Handle<AccessorInfo>(callback)); // callback info
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ __ TailCallRuntime(store_callback_property, 4);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ Move(rcx, Handle<String>(name)); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
@@ -462,17 +1410,165 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
- UNIMPLEMENTED();
- return NULL;
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(receiver->map()));
+ __ j(not_equal, &miss);
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+ __ pop(rbx); // remove the return address
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ __ TailCallRuntime(store_ic_property, 3);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ Move(rcx, Handle<String>(name)); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
- UNIMPLEMENTED();
- return NULL;
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the global has not changed.
+ __ movq(rbx, Operand(rsp, kPointerSize));
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, &miss);
+
+ // Store the value in the cell.
+ __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
+ __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+
+ // Return the value (register rax).
+ __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ __ ret(0);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_field, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+ // Get the name from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ // Check that the name has not changed.
+ __ Cmp(rcx, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+ // Generate store field code. Trashes the name register.
+ GenerateStoreField(masm(),
+ Builtins::KeyedStoreIC_ExtendStorage,
+ object,
+ index,
+ transition,
+ rbx, rcx, rdx,
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_store_field, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
}
@@ -500,6 +1596,66 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ LookupResult* lookup,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ Label* miss) {
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ // Push the arguments on the JS stack of the caller.
+ __ pop(scratch2); // remove return address
+ __ push(receiver); // receiver
+ __ push(reg); // holder
+ __ Move(reg, Handle<AccessorInfo>(callback)); // callback data
+ __ push(reg);
+ __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ push(name_reg); // name
+ __ push(scratch2); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference load_callback_property =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(load_callback_property, 5);
+}
+
+
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
@@ -584,224 +1740,4 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
#undef __
-//-----------------------------------------------------------------------------
-// StubCompiler static helper functions
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register offset) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- Label miss;
-
- __ movq(kScratchRegister, key_offset);
- // Check that the key in the entry matches the name.
- __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
- __ j(not_equal, &miss);
- // Get the code entry from the cache.
- // Use key_offset + kPointerSize, rather than loading value_offset.
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, times_4, kPointerSize));
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
- // Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
- } else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ movq(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
- }
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra) {
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 16.
- ASSERT(sizeof(Entry) == 16);
-
- // Make sure the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
-
- // Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // Check that the object isn't a smi.
- __ testl(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
-
- // Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, miss_label);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ Move(rcx, Handle<Map>(transition));
- Handle<Code> ic(Builtins::builtin(storage_extend));
- __ Jump(ic, RelocInfo::CODE_TARGET);
- return;
- }
-
- if (transition != NULL) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
- }
-
- // Return the value (register rax).
- __ ret(0);
-}
-
-
-#undef __
-
-
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/zone-inl.h b/V8Binding/v8/src/zone-inl.h
index 9af6251..b3141a4 100644
--- a/V8Binding/v8/src/zone-inl.h
+++ b/V8Binding/v8/src/zone-inl.h
@@ -68,6 +68,223 @@ void Zone::adjust_segment_bytes_allocated(int delta) {
}
+template <typename C>
+bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
+ if (is_empty()) {
+ // If the tree is empty, insert the new node.
+ root_ = new Node(key, C::kNoValue);
+ } else {
+ // Splay on the key to move the last node on the search path
+ // for the key to the root of the tree.
+ Splay(key);
+ // Ignore repeated insertions with the same key.
+ int cmp = C::Compare(key, root_->key_);
+ if (cmp == 0) {
+ locator->bind(root_);
+ return false;
+ }
+ // Insert the new node.
+ Node* node = new Node(key, C::kNoValue);
+ if (cmp > 0) {
+ node->left_ = root_;
+ node->right_ = root_->right_;
+ root_->right_ = NULL;
+ } else {
+ node->right_ = root_;
+ node->left_ = root_->left_;
+ root_->left_ = NULL;
+ }
+ root_ = node;
+ }
+ locator->bind(root_);
+ return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
+ if (is_empty())
+ return false;
+ Splay(key);
+ if (C::Compare(key, root_->key_) == 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
+ Locator* locator) {
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key or the last
+ // node on the search path to the top of the tree.
+ Splay(key);
+ // Now the result is either the root node or the greatest node in
+ // the left subtree.
+ int cmp = C::Compare(root_->key_, key);
+ if (cmp <= 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ Node* temp = root_;
+ root_ = root_->left_;
+ bool result = FindGreatest(locator);
+ root_ = temp;
+ return result;
+ }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
+ Locator* locator) {
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key or the last
+ // node on the search path to the top of the tree.
+ Splay(key);
+ // Now the result is either the root node or the least node in
+ // the right subtree.
+ int cmp = C::Compare(root_->key_, key);
+ if (cmp >= 0) {
+ locator->bind(root_);
+ return true;
+ } else {
+ Node* temp = root_;
+ root_ = root_->right_;
+ bool result = FindLeast(locator);
+ root_ = temp;
+ return result;
+ }
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
+ if (is_empty())
+ return false;
+ Node* current = root_;
+ while (current->right_ != NULL)
+ current = current->right_;
+ locator->bind(current);
+ return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
+ if (is_empty())
+ return false;
+ Node* current = root_;
+ while (current->left_ != NULL)
+ current = current->left_;
+ locator->bind(current);
+ return true;
+}
+
+
+template <typename C>
+bool ZoneSplayTree<C>::Remove(const Key& key) {
+ // Bail if the tree is empty
+ if (is_empty())
+ return false;
+ // Splay on the key to move the node with the given key to the top.
+ Splay(key);
+ // Bail if the key is not in the tree
+ if (C::Compare(key, root_->key_) != 0)
+ return false;
+ if (root_->left_ == NULL) {
+ // No left child, so the new tree is just the right child.
+ root_ = root_->right_;
+ } else {
+ // Left child exists.
+ Node* right = root_->right_;
+ // Make the original left child the new root.
+ root_ = root_->left_;
+ // Splay to make sure that the new root has an empty right child.
+ Splay(key);
+ // Insert the original right child as the right child of the new
+ // root.
+ root_->right_ = right;
+ }
+ return true;
+}
+
+
+template <typename C>
+void ZoneSplayTree<C>::Splay(const Key& key) {
+ if (is_empty())
+ return;
+ Node dummy_node(C::kNoKey, C::kNoValue);
+ // Create a dummy node. The use of the dummy node is a bit
+ // counter-intuitive: The right child of the dummy node will hold
+ // the L tree of the algorithm. The left child of the dummy node
+ // will hold the R tree of the algorithm. Using a dummy node, left
+ // and right will always be nodes and we avoid special cases.
+ Node* dummy = &dummy_node;
+ Node* left = dummy;
+ Node* right = dummy;
+ Node* current = root_;
+ while (true) {
+ int cmp = C::Compare(key, current->key_);
+ if (cmp < 0) {
+ if (current->left_ == NULL)
+ break;
+ if (C::Compare(key, current->left_->key_) < 0) {
+ // Rotate right.
+ Node* temp = current->left_;
+ current->left_ = temp->right_;
+ temp->right_ = current;
+ current = temp;
+ if (current->left_ == NULL)
+ break;
+ }
+ // Link right.
+ right->left_ = current;
+ right = current;
+ current = current->left_;
+ } else if (cmp > 0) {
+ if (current->right_ == NULL)
+ break;
+ if (C::Compare(key, current->right_->key_) > 0) {
+ // Rotate left.
+ Node* temp = current->right_;
+ current->right_ = temp->left_;
+ temp->left_ = current;
+ current = temp;
+ if (current->right_ == NULL)
+ break;
+ }
+ // Link left.
+ left->right_ = current;
+ left = current;
+ current = current->right_;
+ } else {
+ break;
+ }
+ }
+ // Assemble.
+ left->right_ = current->left_;
+ right->left_ = current->right_;
+ current->left_ = dummy->right_;
+ current->right_ = dummy->left_;
+ root_ = current;
+}
+
+
+template <typename Node, class Callback>
+static void DoForEach(Node* node, Callback* callback) {
+ if (node == NULL) return;
+ DoForEach<Node, Callback>(node->left(), callback);
+ callback->Call(node->key(), node->value());
+ DoForEach<Node, Callback>(node->right(), callback);
+}
+
+
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_
diff --git a/V8Binding/v8/src/zone.h b/V8Binding/v8/src/zone.h
index a8b26e9..cdbab32 100644
--- a/V8Binding/v8/src/zone.h
+++ b/V8Binding/v8/src/zone.h
@@ -204,6 +204,108 @@ class ZoneScope BASE_EMBEDDED {
};
+template <typename Node, class Callback>
+static void DoForEach(Node* node, Callback* callback);
+
+
+// A zone splay tree. The config type parameter encapsulates the
+// different configurations of a concrete splay tree:
+//
+// typedef Key: the key type
+// typedef Value: the value type
+// static const kNoKey: the dummy key used when no key is set
+// static const kNoValue: the dummy value used to initialize nodes
+// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+//
+template <typename Config>
+class ZoneSplayTree : public ZoneObject {
+ public:
+ typedef typename Config::Key Key;
+ typedef typename Config::Value Value;
+
+ class Locator;
+
+ ZoneSplayTree() : root_(NULL) { }
+
+ // Inserts the given key in this tree with the given value. Returns
+ // true if a node was inserted, otherwise false. If found the locator
+ // is enabled and provides access to the mapping for the key.
+ bool Insert(const Key& key, Locator* locator);
+
+ // Looks up the key in this tree and returns true if it was found,
+ // otherwise false. If the node is found the locator is enabled and
+ // provides access to the mapping for the key.
+ bool Find(const Key& key, Locator* locator);
+
+ // Finds the mapping with the greatest key less than or equal to the
+ // given key.
+ bool FindGreatestLessThan(const Key& key, Locator* locator);
+
+ // Find the mapping with the greatest key in this tree.
+ bool FindGreatest(Locator* locator);
+
+ // Finds the mapping with the least key greater than or equal to the
+ // given key.
+ bool FindLeastGreaterThan(const Key& key, Locator* locator);
+
+ // Find the mapping with the least key in this tree.
+ bool FindLeast(Locator* locator);
+
+ // Remove the node with the given key from the tree.
+ bool Remove(const Key& key);
+
+ bool is_empty() { return root_ == NULL; }
+
+ // Perform the splay operation for the given key. Moves the node with
+ // the given key to the top of the tree. If no node has the given
+ // key, the last node on the search path is moved to the top of the
+ // tree.
+ void Splay(const Key& key);
+
+ class Node : public ZoneObject {
+ public:
+ Node(const Key& key, const Value& value)
+ : key_(key),
+ value_(value),
+ left_(NULL),
+ right_(NULL) { }
+ Key key() { return key_; }
+ Value value() { return value_; }
+ Node* left() { return left_; }
+ Node* right() { return right_; }
+ private:
+ friend class ZoneSplayTree;
+ friend class Locator;
+ Key key_;
+ Value value_;
+ Node* left_;
+ Node* right_;
+ };
+
+ // A locator provides access to a node in the tree without actually
+ // exposing the node.
+ class Locator {
+ public:
+ explicit Locator(Node* node) : node_(node) { }
+ Locator() : node_(NULL) { }
+ const Key& key() { return node_->key_; }
+ Value& value() { return node_->value_; }
+ void set_value(const Value& value) { node_->value_ = value; }
+ inline void bind(Node* node) { node_ = node; }
+ private:
+ Node* node_;
+ };
+
+ template <class Callback>
+ void ForEach(Callback* c) {
+ DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
+ }
+
+ private:
+ Node* root_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ZONE_H_
diff --git a/V8Binding/v8/test/cctest/SConscript b/V8Binding/v8/test/cctest/SConscript
index 7506d29..112ecd6 100644
--- a/V8Binding/v8/test/cctest/SConscript
+++ b/V8Binding/v8/test/cctest/SConscript
@@ -63,9 +63,9 @@ SOURCES = {
'arch:ia32': [
'test-assembler-ia32.cc',
'test-disasm-ia32.cc',
- 'test-log-ia32.cc'
+ 'test-log-stack-tracer.cc'
],
- 'arch:x64': ['test-assembler-x64.cc'],
+ 'arch:x64': ['test-assembler-x64.cc', 'test-log-stack-tracer.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],
diff --git a/V8Binding/v8/test/cctest/cctest.status b/V8Binding/v8/test/cctest/cctest.status
index b234ca3..9abe408 100644
--- a/V8Binding/v8/test/cctest/cctest.status
+++ b/V8Binding/v8/test/cctest/cctest.status
@@ -63,7 +63,6 @@ test-api/TryCatchInTryFinally: FAIL
[ $arch == x64 ]
-test-regexp/Graph: PASS || CRASH || FAIL
test-decls/Present: CRASH || FAIL
test-decls/Unknown: CRASH || FAIL
test-decls/Appearing: CRASH || FAIL
@@ -113,10 +112,9 @@ test-debug/RecursiveBreakpoints: CRASH || FAIL
test-debug/DebuggerUnload: CRASH || FAIL
test-debug/DebuggerHostDispatch: CRASH || FAIL
test-debug/DebugBreakInMessageHandler: CRASH || FAIL
-test-api/HugeConsStringOutOfMemory: CRASH || FAIL
-test-api/OutOfMemory: CRASH || FAIL
-test-api/OutOfMemoryNested: CRASH || FAIL
+test-debug/NoDebugBreakInAfterCompileMessageHandler: CRASH || FAIL
test-api/Threading: CRASH || FAIL
+test-api/Threading2: PASS || TIMEOUT
test-api/TryCatchSourceInfo: CRASH || FAIL
test-api/RegExpInterruption: PASS || TIMEOUT
test-api/RegExpStringModification: PASS || TIMEOUT
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 806e711..35ac031 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -633,6 +633,53 @@ THREADED_TEST(FunctionTemplate) {
}
+THREADED_TEST(FindInstanceInPrototypeChain) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ Local<v8::FunctionTemplate> base = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> derived = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> other = v8::FunctionTemplate::New();
+ derived->Inherit(base);
+
+ Local<v8::Function> base_function = base->GetFunction();
+ Local<v8::Function> derived_function = derived->GetFunction();
+ Local<v8::Function> other_function = other->GetFunction();
+
+ Local<v8::Object> base_instance = base_function->NewInstance();
+ Local<v8::Object> derived_instance = derived_function->NewInstance();
+ Local<v8::Object> derived_instance2 = derived_function->NewInstance();
+ Local<v8::Object> other_instance = other_function->NewInstance();
+ derived_instance2->Set(v8_str("__proto__"), derived_instance);
+ other_instance->Set(v8_str("__proto__"), derived_instance2);
+
+ // base_instance is only an instance of base.
+ CHECK_EQ(base_instance,
+ base_instance->FindInstanceInPrototypeChain(base));
+ CHECK(base_instance->FindInstanceInPrototypeChain(derived).IsEmpty());
+ CHECK(base_instance->FindInstanceInPrototypeChain(other).IsEmpty());
+
+ // derived_instance is an instance of base and derived.
+ CHECK_EQ(derived_instance,
+ derived_instance->FindInstanceInPrototypeChain(base));
+ CHECK_EQ(derived_instance,
+ derived_instance->FindInstanceInPrototypeChain(derived));
+ CHECK(derived_instance->FindInstanceInPrototypeChain(other).IsEmpty());
+
+ // other_instance is an instance of other and its immediate
+ // prototype derived_instance2 is an instance of base and derived.
+ // Note, derived_instance is an instance of base and derived too,
+ // but it comes after derived_instance2 in the prototype chain of
+ // other_instance.
+ CHECK_EQ(derived_instance2,
+ other_instance->FindInstanceInPrototypeChain(base));
+ CHECK_EQ(derived_instance2,
+ other_instance->FindInstanceInPrototypeChain(derived));
+ CHECK_EQ(other_instance,
+ other_instance->FindInstanceInPrototypeChain(other));
+}
+
+
static v8::Handle<Value> handle_property(Local<String> name,
const AccessorInfo&) {
ApiTestFuzzer::Fuzz();
@@ -7548,3 +7595,146 @@ THREADED_TEST(Regress16276) {
context->DetachGlobal();
CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value());
}
+
+
+THREADED_TEST(PixelArray) {
+ v8::HandleScope scope;
+ LocalContext context;
+ const int kElementCount = 40;
+ uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
+ i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
+ pixel_data);
+ i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
+ for (int i = 0; i < kElementCount; i++) {
+ pixels->set(i, i);
+ }
+ i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
+ for (int i = 0; i < kElementCount; i++) {
+ CHECK_EQ(i, pixels->get(i));
+ CHECK_EQ(i, pixel_data[i]);
+ }
+
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+ // Set the elements to be the pixels.
+ // jsobj->set_elements(*pixels);
+ obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
+ CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value());
+ obj->Set(v8_str("field"), v8::Int32::New(1503));
+ context->Global()->Set(v8_str("pixels"), obj);
+ v8::Handle<v8::Value> result = CompileRun("pixels.field");
+ CHECK_EQ(1503, result->Int32Value());
+ result = CompileRun("pixels[1]");
+ CHECK_EQ(1, result->Int32Value());
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i];"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value());
+
+ i::Handle<i::Smi> value(i::Smi::FromInt(2));
+ i::SetElement(jsobj, 1, value);
+ CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1))->value());
+ *value.location() = i::Smi::FromInt(256);
+ i::SetElement(jsobj, 1, value);
+ CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(1))->value());
+ *value.location() = i::Smi::FromInt(-1);
+ i::SetElement(jsobj, 1, value);
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[i] = (i * 65) - 109;"
+ "}"
+ "pixels[1] + pixels[6];");
+ CHECK_EQ(255, result->Int32Value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value());
+ CHECK_EQ(21, i::Smi::cast(jsobj->GetElement(2))->value());
+ CHECK_EQ(86, i::Smi::cast(jsobj->GetElement(3))->value());
+ CHECK_EQ(151, i::Smi::cast(jsobj->GetElement(4))->value());
+ CHECK_EQ(216, i::Smi::cast(jsobj->GetElement(5))->value());
+ CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(6))->value());
+ CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(7))->value());
+ result = CompileRun("var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += pixels[i];"
+ "}"
+ "sum;");
+ CHECK_EQ(984, result->Int32Value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[i] = (i * 1.1);"
+ "}"
+ "pixels[1] + pixels[6];");
+ CHECK_EQ(8, result->Int32Value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value());
+ CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value());
+ CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(2))->value());
+ CHECK_EQ(3, i::Smi::cast(jsobj->GetElement(3))->value());
+ CHECK_EQ(4, i::Smi::cast(jsobj->GetElement(4))->value());
+ CHECK_EQ(6, i::Smi::cast(jsobj->GetElement(5))->value());
+ CHECK_EQ(7, i::Smi::cast(jsobj->GetElement(6))->value());
+ CHECK_EQ(8, i::Smi::cast(jsobj->GetElement(7))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[7] = undefined;"
+ "}"
+ "pixels[7];");
+ CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(7))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[6] = '2.3';"
+ "}"
+ "pixels[6];");
+ CHECK_EQ(2, result->Int32Value());
+ CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(6))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[5] = NaN;"
+ "}"
+ "pixels[5];");
+ CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[8] = Infinity;"
+ "}"
+ "pixels[8];");
+ CHECK_EQ(255, result->Int32Value());
+ CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(8))->value());
+
+ result = CompileRun("for (var i = 0; i < 8; i++) {"
+ " pixels[9] = -Infinity;"
+ "}"
+ "pixels[9];");
+ CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(9))->value());
+
+ result = CompileRun("pixels[3] = 33;"
+ "delete pixels[3];"
+ "pixels[3];");
+ CHECK_EQ(33, result->Int32Value());
+
+ result = CompileRun("pixels[0] = 10; pixels[1] = 11;"
+ "pixels[2] = 12; pixels[3] = 13;"
+ "pixels.__defineGetter__('2',"
+ "function() { return 120; });"
+ "pixels[2];");
+ CHECK_EQ(12, result->Int32Value());
+
+ result = CompileRun("var js_array = new Array(40);"
+ "js_array[0] = 77;"
+ "js_array;");
+ CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+
+ result = CompileRun("pixels[1] = 23;"
+ "pixels.__proto__ = [];"
+ "js_array.__proto__ = pixels;"
+ "js_array.concat(pixels);");
+ CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+ CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+
+ free(pixel_data);
+}
diff --git a/V8Binding/v8/test/cctest/test-ast.cc b/V8Binding/v8/test/cctest/test-ast.cc
index 2054348..9931f56 100644
--- a/V8Binding/v8/test/cctest/test-ast.cc
+++ b/V8Binding/v8/test/cctest/test-ast.cc
@@ -35,11 +35,11 @@
using namespace v8::internal;
TEST(List) {
- List<Node*>* list = new List<Node*>(0);
+ List<AstNode*>* list = new List<AstNode*>(0);
CHECK_EQ(0, list->length());
ZoneScope zone_scope(DELETE_ON_EXIT);
- Node* node = new EmptyStatement();
+ AstNode* node = new EmptyStatement();
list->Add(node);
CHECK_EQ(1, list->length());
CHECK_EQ(node, list->at(0));
diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc
index fddd000..9e2c38d 100644
--- a/V8Binding/v8/test/cctest/test-debug.cc
+++ b/V8Binding/v8/test/cctest/test-debug.cc
@@ -4875,7 +4875,7 @@ TEST(DebugBreakInMessageHandler) {
v8::Debug::SetMessageHandler2(DebugBreakMessageHandler);
// Test functions.
- const char* script = "function f() { debugger; } function g() { }";
+ const char* script = "function f() { debugger; g(); } function g() { }";
CompileRun(script);
v8::Local<v8::Function> f =
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
@@ -4954,8 +4954,10 @@ TEST(RegExpDebugBreak) {
v8::Debug::DebugBreak();
result = f->Call(env->Global(), argc, argv);
- CHECK_EQ(20, break_point_hit_count);
- CHECK_EQ("exec", last_function_hit);
+ // Check that there was only one break event. Matching RegExp should not
+ // cause Break events.
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ("f", last_function_hit);
}
#endif // V8_NATIVE_REGEXP
@@ -5295,3 +5297,63 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
ClearBreakPointFromJS(sbp2);
v8::Debug::SetMessageHandler2(NULL);
}
+
+
+static void BreakMessageHandler(const v8::Debug::Message& message) {
+ if (message.IsEvent() && message.GetEvent() == v8::Break) {
+ // Count the number of breaks.
+ break_point_hit_count++;
+
+ v8::HandleScope scope;
+ v8::Handle<v8::String> json = message.GetJSON();
+
+ SendContinueCommand();
+ } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
+ v8::HandleScope scope;
+
+ bool is_debug_break = i::StackGuard::IsDebugBreak();
+ // Force DebugBreak flag while serializer is working.
+ i::StackGuard::DebugBreak();
+
+ // Force serialization to trigger some internal JS execution.
+ v8::Handle<v8::String> json = message.GetJSON();
+
+ // Restore previous state.
+ if (is_debug_break) {
+ i::StackGuard::DebugBreak();
+ } else {
+ i::StackGuard::Continue(i::DEBUGBREAK);
+ }
+ }
+}
+
+
+// Test that if DebugBreak is forced it is ignored when code from
+// debug-delay.js is executed.
+TEST(NoDebugBreakInAfterCompileMessageHandler) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ // Register a debug event listener which sets the break flag and counts.
+ v8::Debug::SetMessageHandler2(BreakMessageHandler);
+
+ // Set the debug break flag.
+ v8::Debug::DebugBreak();
+
+ // Create a function for testing stepping.
+ const char* src = "function f() { eval('var x = 10;'); } ";
+ v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
+
+ // There should be only one break event.
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Set the debug break flag again.
+ v8::Debug::DebugBreak();
+ f->Call(env->Global(), 0, NULL);
+ // There should be one more break event when the script is evaluated in 'f'.
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Get rid of the debug message handler.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
diff --git a/V8Binding/v8/test/cctest/test-heap.cc b/V8Binding/v8/test/cctest/test-heap.cc
index 5163ff9..6b5907c 100644
--- a/V8Binding/v8/test/cctest/test-heap.cc
+++ b/V8Binding/v8/test/cctest/test-heap.cc
@@ -653,7 +653,7 @@ TEST(JSArray) {
uint32_t int_length = 0;
CHECK(Array::IndexFromObject(length, &int_length));
CHECK_EQ(length, array->length());
- CHECK(!array->HasFastElements()); // Must be in slow mode.
+ CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
array->SetElement(int_length, name);
diff --git a/V8Binding/v8/test/cctest/test-log-ia32.cc b/V8Binding/v8/test/cctest/test-log-stack-tracer.cc
index a40a800..1ef0a93 100644
--- a/V8Binding/v8/test/cctest/test-log-ia32.cc
+++ b/V8Binding/v8/test/cctest/test-log-stack-tracer.cc
@@ -50,7 +50,7 @@ static void DoTrace(Address fp) {
trace_env.sample->fp = reinterpret_cast<uintptr_t>(fp);
// sp is only used to define stack high bound
trace_env.sample->sp =
- reinterpret_cast<unsigned int>(trace_env.sample) - 10240;
+ reinterpret_cast<uintptr_t>(trace_env.sample) - 10240;
StackTracer::Trace(trace_env.sample);
}
@@ -130,7 +130,10 @@ v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunction(
Address TraceExtension::GetFP(const v8::Arguments& args) {
CHECK_EQ(1, args.Length());
- Address fp = reinterpret_cast<Address>(args[0]->Int32Value() << 2);
+ // CodeGenerator::GenerateGetFramePointer pushes EBP / RBP value
+ // on stack. In 64-bit mode we can't use Smi operations code because
+ // they check that value is within Smi bounds.
+ Address fp = *reinterpret_cast<Address*>(*args[0]);
printf("Trace: %p\n", fp);
return fp;
}
@@ -330,8 +333,11 @@ static void CFuncDoTrace() {
Address fp;
#ifdef __GNUC__
fp = reinterpret_cast<Address>(__builtin_frame_address(0));
-#elif defined _MSC_VER
+#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
__asm mov [fp], ebp // NOLINT
+#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
+ // FIXME: I haven't really tried to compile it.
+ __asm movq [fp], rbp // NOLINT
#endif
DoTrace(fp);
}
diff --git a/V8Binding/v8/test/cctest/test-log.cc b/V8Binding/v8/test/cctest/test-log.cc
index f3f7efc..df58234 100644
--- a/V8Binding/v8/test/cctest/test-log.cc
+++ b/V8Binding/v8/test/cctest/test-log.cc
@@ -4,8 +4,12 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
-#include "v8.h"
+#ifdef __linux__
+#include <signal.h>
+#include <unistd.h>
+#endif
+#include "v8.h"
#include "log.h"
#include "cctest.h"
@@ -144,8 +148,25 @@ class LoggerTestHelper : public AllStatic {
using v8::internal::LoggerTestHelper;
+// Under Linux, we need to check if signals were delivered to avoid false
+// positives. Under other platforms profiling is done via a high-priority
+// thread, so this case never happen.
+static bool was_sigprof_received = true;
+#ifdef __linux__
+
+struct sigaction old_sigprof_handler;
+
+static void SigProfSignalHandler(int signal, siginfo_t* info, void* context) {
+ if (signal != SIGPROF) return;
+ was_sigprof_received = true;
+ old_sigprof_handler.sa_sigaction(signal, info, context);
+}
+
+#endif // __linux__
+
+
static int CheckThatProfilerWorks(int log_pos) {
- Logger::ResumeProfiler();
+ Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU);
CHECK(LoggerTestHelper::IsSamplerActive());
// Verify that the current map of compiled functions has been logged.
@@ -160,6 +181,18 @@ static int CheckThatProfilerWorks(int log_pos) {
const char* code_creation = "\ncode-creation,"; // eq. to /^code-creation,/
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
+#ifdef __linux__
+ // Intercept SIGPROF handler to make sure that the test process
+ // had received it. Under load, system can defer it causing test failure.
+ // It is important to execute this after 'ResumeProfiler'.
+ was_sigprof_received = false;
+ struct sigaction sa;
+ sa.sa_sigaction = SigProfSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ CHECK_EQ(0, sigaction(SIGPROF, &sa, &old_sigprof_handler));
+#endif // __linux__
+
// Force compiler to generate new code by parametrizing source.
EmbeddedVector<char, 100> script_src;
i::OS::SNPrintF(script_src,
@@ -170,9 +203,11 @@ static int CheckThatProfilerWorks(int log_pos) {
const double end_time = i::OS::TimeCurrentMillis() + 200;
while (i::OS::TimeCurrentMillis() < end_time) {
CompileAndRunScript(script_src.start());
+ // Yield CPU to give Profiler thread a chance to process ticks.
+ i::OS::Sleep(1);
}
- Logger::PauseProfiler();
+ Logger::PauseProfiler(v8::PROFILER_MODULE_CPU);
CHECK(!LoggerTestHelper::IsSamplerActive());
// Wait 50 msecs to allow Profiler thread to process the last
@@ -189,7 +224,8 @@ static int CheckThatProfilerWorks(int log_pos) {
buffer[log_size] = '\0';
const char* tick = "\ntick,";
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
- CHECK_NE(NULL, strstr(buffer.start(), tick));
+ const bool ticks_found = strstr(buffer.start(), tick) != NULL;
+ CHECK_EQ(was_sigprof_received, ticks_found);
return log_pos;
}
diff --git a/V8Binding/v8/test/cctest/test-regexp.cc b/V8Binding/v8/test/cctest/test-regexp.cc
index 33a83c7..8d8326c 100644
--- a/V8Binding/v8/test/cctest/test-regexp.cc
+++ b/V8Binding/v8/test/cctest/test-regexp.cc
@@ -35,7 +35,7 @@
#include "zone-inl.h"
#include "parser.h"
#include "ast.h"
-#include "jsregexp-inl.h"
+#include "jsregexp.h"
#include "regexp-macro-assembler.h"
#include "regexp-macro-assembler-irregexp.h"
#ifdef V8_TARGET_ARCH_ARM
diff --git a/V8Binding/v8/test/mjsunit/debug-stepin-builtin.js b/V8Binding/v8/test/mjsunit/debug-stepin-builtin.js
new file mode 100644
index 0000000..c6a97ea
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/debug-stepin-builtin.js
@@ -0,0 +1,78 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step in' action
+// and than check that execution is paused inside function
+// expected_function_name.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_function_name, event_data.func().name());
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var a = [1,2,3,4,5];
+
+// Test step into function call from a function without local variables.
+function testStepInArraySlice() {
+ expected_function_name = 'testStepInArraySlice';
+ expected_source_line_text = '} // expected line';
+ debugger;
+ var s = Array.prototype.slice.call(a, 2,3);
+} // expected line
+
+state = 1;
+testStepInArraySlice();
+assertNull(exception);
+assertEquals(3, state);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/V8Binding/v8/test/mjsunit/mjsunit.status b/V8Binding/v8/test/mjsunit/mjsunit.status
index d30e78c..6853cdc 100644
--- a/V8Binding/v8/test/mjsunit/mjsunit.status
+++ b/V8Binding/v8/test/mjsunit/mjsunit.status
@@ -59,6 +59,7 @@ debug-multiple-breakpoints: CRASH || FAIL
debug-setbreakpoint: CRASH || FAIL || PASS
debug-step-stub-callfunction: SKIP
debug-stepin-accessor: CRASH || FAIL
+debug-stepin-builtin: CRASH || FAIL
debug-stepin-constructor: CRASH, FAIL
debug-stepin-function-call: CRASH || FAIL
debug-step: SKIP
@@ -76,7 +77,6 @@ array-splice: PASS || TIMEOUT
[ $arch == x64 ]
debug-backtrace: CRASH || FAIL
-date-parse: CRASH || FAIL
debug-backtrace-text: CRASH || FAIL
debug-multiple-breakpoints: CRASH || FAIL
debug-breakpoints: CRASH || FAIL
@@ -93,21 +93,13 @@ debug-ignore-breakpoints: CRASH || FAIL
debug-setbreakpoint: CRASH || FAIL
debug-step-stub-callfunction: CRASH || FAIL
debug-step: CRASH || FAIL
-mirror-date: CRASH || FAIL
-invalid-lhs: PASS || CRASH || FAIL
+debug-stepin-builtin: CRASH || FAIL
debug-stepin-constructor: CRASH || FAIL
debug-stepin-function-call: CRASH || FAIL
debug-stepin-accessor: CRASH || FAIL
-new: CRASH || FAIL
fuzz-natives: PASS || TIMEOUT
-greedy: PASS || TIMEOUT
debug-handle: CRASH || FAIL
-string-indexof: PASS || TIMEOUT
debug-clearbreakpointgroup: CRASH || FAIL
regress/regress-269: CRASH || FAIL
-div-mod: CRASH || FAIL
-unicode-test: PASS || TIMEOUT
-regress/regress-392: CRASH || FAIL
-regress/regress-1200351: CRASH || FAIL
regress/regress-998565: CRASH || FAIL
tools/tickprocessor: PASS || CRASH || FAIL
diff --git a/V8Binding/v8/test/mjsunit/regress/regress-416.js b/V8Binding/v8/test/mjsunit/regress/regress-416.js
new file mode 100644
index 0000000..d204bd3
--- /dev/null
+++ b/V8Binding/v8/test/mjsunit/regress/regress-416.js
@@ -0,0 +1,38 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test of invalid Date construction, and TimeClip function.
+
+// See http://code.google.com/p/v8/issues/detail?id=416
+
+assertTrue(isNaN(new Date(1e81).getTime()), "new Date(1e81)");
+assertTrue(isNaN(new Date(-1e81).getTime()), "new Date(-1e81)");
+assertTrue(isNaN(new Date(1e81, "").getTime()), "new Date(1e81, \"\")");
+assertTrue(isNaN(new Date(-1e81, "").getTime()), "new Date(-1e81, \"\")");
+assertTrue(isNaN(new Date(Number.NaN).getTime()), "new Date(Number.NaN)");
+assertTrue(isNaN(new Date(Number.NaN, "").getTime()),
+ "new Date(Number.NaN, \"\")");
diff --git a/V8Binding/v8/test/mjsunit/stack-traces.js b/V8Binding/v8/test/mjsunit/stack-traces.js
index e457ece..3bb5755 100644
--- a/V8Binding/v8/test/mjsunit/stack-traces.js
+++ b/V8Binding/v8/test/mjsunit/stack-traces.js
@@ -84,9 +84,26 @@ function testAnonymousMethod() {
(function () { FAIL }).call([1, 2, 3]);
}
+function CustomError(message, stripPoint) {
+ this.message = message;
+ Error.captureStackTrace(this, stripPoint);
+}
+
+CustomError.prototype.toString = function () {
+ return "CustomError: " + this.message;
+};
+
+function testDefaultCustomError() {
+ throw new CustomError("hep-hey", undefined);
+}
+
+function testStrippedCustomError() {
+ throw new CustomError("hep-hey", CustomError);
+}
+
// Utility function for testing that the expected strings occur
// in the stack trace produced when running the given function.
-function testTrace(fun, expected) {
+function testTrace(fun, expected, unexpected) {
var threw = false;
try {
fun();
@@ -94,6 +111,11 @@ function testTrace(fun, expected) {
for (var i = 0; i < expected.length; i++) {
assertTrue(e.stack.indexOf(expected[i]) != -1);
}
+ if (unexpected) {
+ for (var i = 0; i < unexpected.length; i++) {
+ assertEquals(e.stack.indexOf(unexpected[i]), -1);
+ }
+ }
threw = true;
}
assertTrue(threw);
@@ -165,6 +187,10 @@ testTrace(testValue, ["at Number.causeError"]);
testTrace(testConstructor, ["new Plonk"]);
testTrace(testRenamedMethod, ["Wookie.a$b$c$d [as d]"]);
testTrace(testAnonymousMethod, ["Array.<anonymous>"]);
+testTrace(testDefaultCustomError, ["hep-hey", "new CustomError"],
+ ["collectStackTrace"]);
+testTrace(testStrippedCustomError, ["hep-hey"], ["new CustomError",
+ "collectStackTrace"]);
testCallerCensorship();
testUnintendedCallerCensorship();
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index 538b0a8..74ba8f3 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -1,4 +1,4 @@
-# Copyright 2008 the V8 project authors. All rights reserved.
+# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -803,3 +803,16 @@ ecma/Expressions/11.7.3: SKIP
ecma/Expressions/11.10-3: SKIP
ecma/Expressions/11.7.1: SKIP
ecma_3/RegExp/regress-209067: SKIP
+
+[ $ARCH == x64 ]
+
+# Tests that fail on the 64-bit port. This section should be empty
+# when the 64-bit port is fully debugged.
+
+js1_2/regexp/regress-9141: FAIL
+js1_5/Regress/regress-211590: CRASH
+js1_5/Regress/regress-303213: PASS || CRASH
+js1_5/extensions/regress-336410-2: CRASH
+js1_5/extensions/regress-336410-1: CRASH
+js1_5/Function/regress-338001: FAIL || CRASH
+js1_5/extensions/regress-371636: CRASH
diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp
index fc49620..365d87c 100644
--- a/V8Binding/v8/tools/gyp/v8.gyp
+++ b/V8Binding/v8/tools/gyp/v8.gyp
@@ -40,7 +40,7 @@
'defines': [
'ENABLE_LOGGING_AND_PROFILING',
],
- 'conditions': [
+ 'conditions': [
['target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@@ -52,6 +52,11 @@
'V8_NATIVE_REGEXP',
],
}],
+ ['target_arch=="x64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_X64',
+ ],
+ }],
],
'configurations': {
'Debug': {
@@ -211,6 +216,8 @@
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
+ '../../src/cfg.cc',
+ '../../src/cfg.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
@@ -277,7 +284,6 @@
'../../src/jump-target.cc',
'../../src/jump-target.h',
'../../src/jump-target-inl.h',
- '../../src/jsregexp-inl.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
'../../src/list-inl.h',
@@ -379,6 +385,7 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
+ '../../src/arm/cfg-arm.cc',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -409,6 +416,7 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
+ '../../src/ia32/cfg-ia32.cc',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -428,6 +436,35 @@
'../../src/ia32/virtual-frame-ia32.h',
],
}],
+ ['target_arch=="x64"', {
+ 'include_dirs+': [
+ '../../src/x64',
+ ],
+ 'sources': [
+ '../../src/x64/assembler-x64-inl.h',
+ '../../src/x64/assembler-x64.cc',
+ '../../src/x64/assembler-x64.h',
+ '../../src/x64/builtins-x64.cc',
+ '../../src/x64/cfg-x64.cc',
+ '../../src/x64/codegen-x64.cc',
+ '../../src/x64/codegen-x64.h',
+ '../../src/x64/cpu-x64.cc',
+ '../../src/x64/debug-x64.cc',
+ '../../src/x64/disasm-x64.cc',
+ '../../src/x64/frames-x64.cc',
+ '../../src/x64/frames-x64.h',
+ '../../src/x64/ic-x64.cc',
+ '../../src/x64/jump-target-x64.cc',
+ '../../src/x64/macro-assembler-x64.cc',
+ '../../src/x64/macro-assembler-x64.h',
+ #'../../src/x64/regexp-macro-assembler-x64.cc',
+ #'../../src/x64/regexp-macro-assembler-x64.h',
+ '../../src/x64/register-allocator-x64.cc',
+ '../../src/x64/stub-cache-x64.cc',
+ '../../src/x64/virtual-frame-x64.cc',
+ '../../src/x64/virtual-frame-x64.h',
+ ],
+ }],
['OS=="linux"', {
'link_settings': {
'libraries': [
diff --git a/V8Binding/v8/tools/process-heap-prof.py b/V8Binding/v8/tools/process-heap-prof.py
index b8ab2d3..ff83952 100755
--- a/V8Binding/v8/tools/process-heap-prof.py
+++ b/V8Binding/v8/tools/process-heap-prof.py
@@ -35,10 +35,14 @@
# $ ./shell --log-gc script.js
# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
# ('-c' enables color, see hp2ps manual page for more options)
+# or
+# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps
+# to get JS constructor profile
+
import csv, sys, time
-def process_logfile(filename):
+def process_logfile(filename, itemname):
first_call_time = None
sample_time = 0.0
sampling = False
@@ -63,11 +67,14 @@ def process_logfile(filename):
elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
print('END_SAMPLE %.2f' % sample_time)
sampling = False
- elif row[0] == 'heap-sample-item' and sampling:
+ elif row[0] == itemname and sampling:
print('%s %d' % (row[1], int(row[3])))
finally:
logfile.close()
except:
sys.exit('can\'t open %s' % filename)
-process_logfile(sys.argv[1])
+if sys.argv[1] == '--js-cons-profile':
+ process_logfile(sys.argv[2], 'heap-js-cons-item')
+else:
+ process_logfile(sys.argv[1], 'heap-sample-item')
diff --git a/V8Binding/v8/tools/tickprocessor.js b/V8Binding/v8/tools/tickprocessor.js
index efd9750..34c6195 100644
--- a/V8Binding/v8/tools/tickprocessor.js
+++ b/V8Binding/v8/tools/tickprocessor.js
@@ -429,7 +429,7 @@ function UnixCppEntriesProvider(nmExec) {
this.symbols = [];
this.parsePos = 0;
this.nmExec = nmExec;
- this.FUNC_RE = /^([0-9a-fA-F]{8}) ([0-9a-fA-F]{8} )?[tTwW] (.*)$/;
+ this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
diff --git a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
index da155b8..45e6361 100644
--- a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
@@ -512,7 +512,6 @@
89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bytecodes-irregexp.h"; sourceTree = "<group>"; };
89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "interpreter-irregexp.cc"; sourceTree = "<group>"; };
89A15C670EE4665300B48DEB /* interpreter-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "interpreter-irregexp.h"; sourceTree = "<group>"; };
- 89A15C680EE4665300B48DEB /* jsregexp-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jsregexp-inl.h"; sourceTree = "<group>"; };
89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-freebsd.cc"; sourceTree = "<group>"; };
89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "regexp-macro-assembler-arm.cc"; path = "arm/regexp-macro-assembler-arm.cc"; sourceTree = "<group>"; };
89A15C710EE466D000B48DEB /* regexp-macro-assembler-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "regexp-macro-assembler-arm.h"; path = "arm/regexp-macro-assembler-arm.h"; sourceTree = "<group>"; };
@@ -733,7 +732,6 @@
897FF14D0E719B8F00D62E90 /* ic.h */,
89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */,
89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
- 89A15C680EE4665300B48DEB /* jsregexp-inl.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
897FF14F0E719B8F00D62E90 /* jsregexp.h */,
58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
diff --git a/V8Binding/v8/tools/visual_studio/v8_base.vcproj b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
index ece631a..421cc7c 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base.vcproj
@@ -237,6 +237,18 @@
>
</File>
<File
+ RelativePath="..\..\src\ia32\cfg-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\cfg.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\cfg.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
@@ -521,10 +533,6 @@
>
</File>
<File
- RelativePath="..\..\src\jsregexp-inl.h"
- >
- </File>
- <File
RelativePath="..\..\src\jsregexp.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
index d73747e..8fe54af 100644
--- a/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_base_arm.vcproj
@@ -237,6 +237,18 @@
>
</File>
<File
+ RelativePath="..\..\src\arm\cfg-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\cfg.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\cfg.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\char-predicates-inl.h"
>
</File>
@@ -521,10 +533,6 @@
>
</File>
<File
- RelativePath="..\..\src\jsregexp-inl.h"
- >
- </File>
- <File
RelativePath="..\..\src\jsregexp.cc"
>
</File>
diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
index 97de446..ec07889 100644
--- a/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_cctest.vcproj
@@ -210,7 +210,7 @@
>
</File>
<File
- RelativePath="..\..\test\cctest\test-log-ia32.cc"
+ RelativePath="..\..\test\cctest\test-log-stack-tracer.cc"
>
</File>
<File
diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION
index 4706c2d..c1ba036 100644
--- a/WEBKIT_MERGE_REVISION
+++ b/WEBKIT_MERGE_REVISION
@@ -2,4 +2,4 @@ We sync with Chromium release revision, which has both webkit revision and V8 re
http://src.chromium.org/svn/branches/187/src@18043
http://svn.webkit.org/repository/webkit/trunk@44544
- http://v8.googlecode.com/svn/branches/bleeding_edge@2530
+ http://v8.googlecode.com/svn/branches/bleeding_edge@2654