summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrei Popescu <andreip@google.com>2009-08-28 12:56:47 +0100
committerAndrei Popescu <andreip@google.com>2009-08-28 12:56:47 +0100
commit3c8aa761f02fba434a444f6ed068375f6e4fbc0e (patch)
treecda851baed3a24c515a73525c802689788916f5f
parent8e008b4b8cd6aff82cb9a9705e13a637285c705d (diff)
downloadexternal_webkit-3c8aa761f02fba434a444f6ed068375f6e4fbc0e.zip
external_webkit-3c8aa761f02fba434a444f6ed068375f6e4fbc0e.tar.gz
external_webkit-3c8aa761f02fba434a444f6ed068375f6e4fbc0e.tar.bz2
Update V8 to 2780 to fix a crash and pick up some ARM optimizations
-rw-r--r--V8Binding/v8/ChangeLog35
-rw-r--r--V8Binding/v8/SConstruct71
-rw-r--r--V8Binding/v8/include/v8.h358
-rw-r--r--V8Binding/v8/src/api.cc189
-rw-r--r--V8Binding/v8/src/api.h24
-rw-r--r--V8Binding/v8/src/apiutils.h2
-rw-r--r--V8Binding/v8/src/arm/builtins-arm.cc22
-rw-r--r--V8Binding/v8/src/arm/cfg-arm.cc2
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc127
-rw-r--r--V8Binding/v8/src/arm/disasm-arm.cc2
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc21
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.cc25
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.h6
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.cc1
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc27
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.cc5
-rw-r--r--V8Binding/v8/src/assembler.cc5
-rw-r--r--V8Binding/v8/src/assembler.h3
-rw-r--r--V8Binding/v8/src/builtins.cc81
-rw-r--r--V8Binding/v8/src/compiler.cc7
-rw-r--r--V8Binding/v8/src/debug-delay.js9
-rw-r--r--V8Binding/v8/src/debug.cc6
-rw-r--r--V8Binding/v8/src/execution.cc9
-rw-r--r--V8Binding/v8/src/execution.h22
-rw-r--r--V8Binding/v8/src/frames-inl.h5
-rw-r--r--V8Binding/v8/src/frames.h9
-rw-r--r--V8Binding/v8/src/global-handles.cc40
-rw-r--r--V8Binding/v8/src/globals.h14
-rw-r--r--V8Binding/v8/src/handles.cc12
-rw-r--r--V8Binding/v8/src/handles.h6
-rw-r--r--V8Binding/v8/src/heap-inl.h4
-rw-r--r--V8Binding/v8/src/heap.cc65
-rw-r--r--V8Binding/v8/src/heap.h76
-rw-r--r--V8Binding/v8/src/ia32/builtins-ia32.cc37
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.cc42
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc140
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.h49
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc130
-rw-r--r--V8Binding/v8/src/mark-compact.cc2
-rw-r--r--V8Binding/v8/src/mksnapshot.cc2
-rw-r--r--V8Binding/v8/src/objects-debug.cc2
-rw-r--r--V8Binding/v8/src/objects-inl.h25
-rw-r--r--V8Binding/v8/src/objects.cc53
-rw-r--r--V8Binding/v8/src/objects.h18
-rw-r--r--V8Binding/v8/src/platform-win32.cc25
-rw-r--r--V8Binding/v8/src/runtime.cc53
-rw-r--r--V8Binding/v8/src/runtime.h3
-rw-r--r--V8Binding/v8/src/scanner.cc3
-rw-r--r--V8Binding/v8/src/scanner.h4
-rw-r--r--V8Binding/v8/src/serialize.cc24
-rw-r--r--V8Binding/v8/src/spaces.cc71
-rw-r--r--V8Binding/v8/src/spaces.h25
-rw-r--r--V8Binding/v8/src/stub-cache.cc7
-rw-r--r--V8Binding/v8/src/stub-cache.h11
-rw-r--r--V8Binding/v8/src/utils.h12
-rw-r--r--V8Binding/v8/src/v8-counters.h1
-rw-r--r--V8Binding/v8/src/v8.cc15
-rw-r--r--V8Binding/v8/src/v8.h2
-rw-r--r--V8Binding/v8/src/version.cc2
-rw-r--r--V8Binding/v8/src/x64/builtins-x64.cc47
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc333
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h8
-rw-r--r--V8Binding/v8/src/x64/frames-x64.h3
-rw-r--r--V8Binding/v8/src/x64/ic-x64.cc76
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc159
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h49
-rw-r--r--V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc24
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc12
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc119
-rw-r--r--V8Binding/v8/test/cctest/test-debug.cc16
-rw-r--r--V8Binding/v8/test/cctest/test-disasm-arm.cc12
-rw-r--r--V8Binding/v8/test/cctest/test-log-stack-tracer.cc6
-rw-r--r--V8Binding/v8/test/cctest/test-log.cc2
-rw-r--r--V8Binding/v8/test/cctest/test-serialize.cc10
-rw-r--r--V8Binding/v8/test/cctest/test-strings.cc6
-rw-r--r--V8Binding/v8/test/mjsunit/date-parse.js4
-rw-r--r--V8Binding/v8/test/mjsunit/debug-stepin-constructor.js4
-rw-r--r--V8Binding/v8/test/mjsunit/mjsunit.status2
-rwxr-xr-xV8Binding/v8/test/mjsunit/simple-constructor.js66
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status8
-rw-r--r--V8Binding/v8/tools/gyp/v8.gyp23
-rw-r--r--V8Binding/v8/tools/v8.xcodeproj/project.pbxproj2
-rw-r--r--V8Binding/v8/tools/visual_studio/d8_x64.vcproj2
-rw-r--r--V8Binding/v8/tools/visual_studio/debug.vsprops2
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj2
-rw-r--r--V8Binding/v8/tools/visual_studio/v8_shell_sample_x64.vcproj2
-rw-r--r--WEBKIT_MERGE_REVISION2
87 files changed, 2342 insertions, 707 deletions
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index c59661d..7ea34a5 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,38 @@
+2009-08-26: Version 1.3.8
+
+ Changed the handling of idle notifications to allow idle
+ notifications when V8 has not yet been initialized.
+
+ Fixed ARM simulator compilation problem on Windows.
+
+
+2009-08-25: Version 1.3.7
+
+ Reduced the size of generated code on ARM platforms by reducing
+ the size of constant pools.
+
+ Changed build files to not include the 'ENV' user environment
+ variable in the build environment.
+
+ Changed the handling of idle notifications.
+
+
+2009-08-21: Version 1.3.6
+
+ Add support for forceful termination of JavaScript execution.
+
+ Add low memory notification to the API. The embedding host can signal
+ a low memory situation to V8.
+
+ Changed the handling of global handles (persistent handles in the API
+ sense) to avoid issues regarding allocation of new global handles
+ during weak handle callbacks.
+
+ Changed the growth policy of the young space.
+
+ Fixed a GC issue introduced in version 1.3.5.
+
+
2009-08-19: Version 1.3.5
Optimize initialization of some arrays in the builtins.
diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct
index efd34db..4d1792f 100644
--- a/V8Binding/v8/SConstruct
+++ b/V8Binding/v8/SConstruct
@@ -105,6 +105,9 @@ LIBRARY_FLAGS = {
'arch:x64' : {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
}
+ },
+ 'mode:debug': {
+ 'CPPDEFINES': ['V8_ENABLE_CHECKS']
}
},
'gcc': {
@@ -178,17 +181,25 @@ LIBRARY_FLAGS = {
},
'msvc': {
'all': {
- 'DIALECTFLAGS': ['/nologo'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
- 'CPPDEFINES': ['WIN32', '_USE_32BIT_TIME_T'],
- 'LINKFLAGS': ['/NOLOGO', '/MACHINE:X86', '/INCREMENTAL:NO',
- '/NXCOMPAT', '/IGNORE:4221'],
- 'ARFLAGS': ['/NOLOGO'],
+ 'CPPDEFINES': ['WIN32'],
+ 'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
},
+ 'verbose:off': {
+ 'DIALECTFLAGS': ['/nologo'],
+ 'ARFLAGS': ['/NOLOGO']
+ },
'arch:ia32': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
+ 'CPPDEFINES': ['V8_TARGET_ARCH_IA32', '_USE_32BIT_TIME_T'],
+ 'LINKFLAGS': ['/MACHINE:X86'],
+ 'ARFLAGS': ['/MACHINE:X86']
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'LINKFLAGS': ['/MACHINE:X64'],
+ 'ARFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'],
@@ -250,11 +261,13 @@ V8_EXTRA_FLAGS = {
},
'msvc': {
'all': {
- 'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
+ 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
},
- 'library:shared': {
- 'CPPDEFINES': ['BUILDING_V8_SHARED'],
- 'LIBS': ['winmm', 'ws2_32']
+ 'arch:ia32': {
+ 'WARNINGFLAGS': ['/W3']
+ },
+ 'arch:x64': {
+ 'WARNINGFLAGS': ['/W2']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
@@ -352,7 +365,10 @@ CCTEST_EXTRA_FLAGS = {
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
- }
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64']
+ },
}
}
@@ -417,10 +433,15 @@ SAMPLE_FLAGS = {
},
'msvc': {
'all': {
- 'CCFLAGS': ['/nologo'],
- 'LINKFLAGS': ['/nologo'],
'LIBS': ['winmm', 'ws2_32']
},
+ 'verbose:off': {
+ 'CCFLAGS': ['/nologo'],
+ 'LINKFLAGS': ['/NOLOGO']
+ },
+ 'verbose:on': {
+ 'LINKFLAGS': ['/VERBOSE']
+ },
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
@@ -442,7 +463,12 @@ SAMPLE_FLAGS = {
}
},
'arch:ia32': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
+ 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
+ 'LINKFLAGS': ['/MACHINE:X86']
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'LINKFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
@@ -585,6 +611,11 @@ SIMPLE_OPTIONS = {
'values': ['dumb', 'readline'],
'default': 'dumb',
'help': 'the console to use for the d8 shell'
+ },
+ 'verbose': {
+ 'values': ['on', 'off'],
+ 'default': 'off',
+ 'help': 'more output from compiler and linker'
}
}
@@ -789,12 +820,20 @@ def BuildSpecific(env, mode, env_overrides):
context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))
- library_flags = context.AddRelevantFlags(os.environ, LIBRARY_FLAGS)
+ # Remove variables which can't be imported from the user's external
+ # environment into a construction environment.
+ user_environ = os.environ.copy()
+ try:
+ del user_environ['ENV']
+ except KeyError:
+ pass
+
+ library_flags = context.AddRelevantFlags(user_environ, LIBRARY_FLAGS)
v8_flags = context.AddRelevantFlags(library_flags, V8_EXTRA_FLAGS)
mksnapshot_flags = context.AddRelevantFlags(library_flags, MKSNAPSHOT_EXTRA_FLAGS)
dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
- sample_flags = context.AddRelevantFlags(os.environ, SAMPLE_FLAGS)
+ sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
context.flags = {
diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h
index c7cc315..346050d 100644
--- a/V8Binding/v8/include/v8.h
+++ b/V8Binding/v8/include/v8.h
@@ -127,6 +127,12 @@ class FunctionTemplate;
class ObjectTemplate;
class Data;
+namespace internal {
+
+class Object;
+
+}
+
// --- W e a k H a n d l e s
@@ -227,8 +233,8 @@ template <class T> class V8EXPORT_INLINE Handle {
* The handles' references are not checked.
*/
template <class S> bool operator==(Handle<S> that) const {
- void** a = reinterpret_cast<void**>(**this);
- void** b = reinterpret_cast<void**>(*that);
+ internal::Object** a = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
@@ -245,7 +251,11 @@ template <class T> class V8EXPORT_INLINE Handle {
}
template <class S> static inline Handle<T> Cast(Handle<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Handle<T>();
+#endif
return Handle<T>(T::Cast(*that));
}
@@ -275,7 +285,11 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
}
template <class S> inline Local(S* that) : Handle<T>(that) { }
template <class S> static inline Local<T> Cast(Local<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Local<T>();
+#endif
return Local<T>(T::Cast(*that));
}
@@ -344,7 +358,11 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
: Handle<T>(*that) { }
template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Persistent<T>();
+#endif
return Persistent<T>(T::Cast(*that));
}
@@ -423,7 +441,7 @@ class V8EXPORT HandleScope {
/**
* Creates a new handle with the given value.
*/
- static void** CreateHandle(void* value);
+ static internal::Object** CreateHandle(internal::Object* value);
private:
// Make it impossible to create heap-allocated or illegal handle
@@ -438,8 +456,8 @@ class V8EXPORT HandleScope {
class V8EXPORT Data {
public:
int extensions;
- void** next;
- void** limit;
+ internal::Object** next;
+ internal::Object** limit;
inline void Initialize() {
extensions = -1;
next = limit = NULL;
@@ -451,7 +469,7 @@ class V8EXPORT HandleScope {
// Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope.
bool is_closed_;
- void** RawClose(void** value);
+ internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities;
};
@@ -671,7 +689,7 @@ class V8EXPORT Value : public Data {
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- bool IsString() const;
+ inline bool IsString() const;
/**
* Returns true if this value is a function.
@@ -737,6 +755,10 @@ class V8EXPORT Value : public Data {
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
+
+ private:
+ inline bool QuickIsString() const;
+ bool FullIsString() const;
};
@@ -868,7 +890,7 @@ class V8EXPORT String : public Primitive {
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- ExternalStringResource* GetExternalStringResource() const;
+ inline ExternalStringResource* GetExternalStringResource() const;
/**
* Get the ExternalAsciiStringResource for an external ascii string.
@@ -876,7 +898,7 @@ class V8EXPORT String : public Primitive {
*/
ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
- static String* Cast(v8::Value* obj);
+ static inline String* Cast(v8::Value* obj);
/**
* Allocates a new string from either utf-8 encoded or ascii data.
@@ -1010,6 +1032,10 @@ class V8EXPORT String : public Primitive {
Value(const Value&);
void operator=(const Value&);
};
+
+ private:
+ void VerifyExternalStringResource(ExternalStringResource* val) const;
+ static void CheckCast(v8::Value* obj);
};
@@ -1020,9 +1046,10 @@ class V8EXPORT Number : public Primitive {
public:
double Value() const;
static Local<Number> New(double value);
- static Number* Cast(v8::Value* obj);
+ static inline Number* Cast(v8::Value* obj);
private:
Number();
+ static void CheckCast(v8::Value* obj);
};
@@ -1033,9 +1060,10 @@ class V8EXPORT Integer : public Number {
public:
static Local<Integer> New(int32_t value);
int64_t Value() const;
- static Integer* Cast(v8::Value* obj);
+ static inline Integer* Cast(v8::Value* obj);
private:
Integer();
+ static void CheckCast(v8::Value* obj);
};
@@ -1074,7 +1102,9 @@ class V8EXPORT Date : public Value {
*/
double NumberValue() const;
- static Date* Cast(v8::Value* obj);
+ static inline Date* Cast(v8::Value* obj);
+ private:
+ static void CheckCast(v8::Value* obj);
};
@@ -1153,14 +1183,13 @@ class V8EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
/** Gets the value in an internal field. */
- Local<Value> GetInternalField(int index);
+ inline Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value);
- // The two functions below do not perform index bounds checks and
- // they do not check that the VM is still running. Use with caution.
/** Gets a native pointer from an internal field. */
- void* GetPointerFromInternalField(int index);
+ inline void* GetPointerFromInternalField(int index);
+
/** Sets a native pointer in an internal field. */
void SetPointerInInternalField(int index, void* value);
@@ -1223,9 +1252,17 @@ class V8EXPORT Object : public Value {
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
static Local<Object> New();
- static Object* Cast(Value* obj);
+ static inline Object* Cast(Value* obj);
private:
Object();
+ static void CheckCast(Value* obj);
+ Local<Value> CheckedGetInternalField(int index);
+
+ /**
+ * If quick access to the internal field is possible this method
+ * returns the value. Otherwise an empty handle is returned.
+ */
+ inline Local<Value> UncheckedGetInternalField(int index);
};
@@ -1243,9 +1280,10 @@ class V8EXPORT Array : public Object {
Local<Object> CloneElementAt(uint32_t index);
static Local<Array> New(int length = 0);
- static Array* Cast(Value* obj);
+ static inline Array* Cast(Value* obj);
private:
Array();
+ static void CheckCast(Value* obj);
};
@@ -1259,9 +1297,10 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
- static Function* Cast(Value* obj);
+ static inline Function* Cast(Value* obj);
private:
Function();
+ static void CheckCast(Value* obj);
};
@@ -1279,13 +1318,16 @@ class V8EXPORT Function : public Object {
class V8EXPORT External : public Value {
public:
static Local<Value> Wrap(void* data);
- static void* Unwrap(Handle<Value> obj);
+ static inline void* Unwrap(Handle<Value> obj);
static Local<External> New(void* value);
- static External* Cast(Value* obj);
+ static inline External* Cast(Value* obj);
void* Value() const;
private:
External();
+ static void CheckCast(v8::Value* obj);
+ static inline void* QuickUnwrap(Handle<v8::Value> obj);
+ static void* FullUnwrap(Handle<v8::Value> obj);
};
@@ -2280,9 +2322,13 @@ class V8EXPORT V8 {
/**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
+ * This call can be used repeatedly if the embedder remains idle.
* \param is_high_priority tells whether the embedder is high priority.
+ * Returns true if the embedder should stop calling IdleNotification
+ * until real work has been done. This indicates that V8 has done
+ * as much cleanup as it will be able to do.
*/
- static void IdleNotification(bool is_high_priority);
+ static bool IdleNotification(bool is_high_priority);
/**
* Optional notification that the system is running low on memory.
@@ -2293,12 +2339,14 @@ class V8EXPORT V8 {
private:
V8();
- static void** GlobalizeReference(void** handle);
- static void DisposeGlobal(void** global_handle);
- static void MakeWeak(void** global_handle, void* data, WeakReferenceCallback);
- static void ClearWeak(void** global_handle);
- static bool IsGlobalNearDeath(void** global_handle);
- static bool IsGlobalWeak(void** global_handle);
+ static internal::Object** GlobalizeReference(internal::Object** handle);
+ static void DisposeGlobal(internal::Object** global_handle);
+ static void MakeWeak(internal::Object** global_handle,
+ void* data,
+ WeakReferenceCallback);
+ static void ClearWeak(internal::Object** global_handle);
+ static bool IsGlobalNearDeath(internal::Object** global_handle);
+ static bool IsGlobalWeak(internal::Object** global_handle);
template <class T> friend class Handle;
template <class T> friend class Local;
@@ -2637,6 +2685,76 @@ class V8EXPORT Locker {
// --- I m p l e m e n t a t i o n ---
+
+namespace internal {
+
+
+// Tag information for HeapObject.
+const int kHeapObjectTag = 1;
+const int kHeapObjectTagSize = 2;
+const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
+
+
+// Tag information for Smi.
+const int kSmiTag = 0;
+const int kSmiTagSize = 1;
+const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
+
+
+/**
+ * This class exports constants and functionality from within v8 that
+ * is necessary to implement inline functions in the v8 api. Don't
+ * depend on functions and constants defined here.
+ */
+class Internals {
+ public:
+
+ // These values match non-compiler-dependent values defined within
+ // the implementation of v8.
+ static const int kHeapObjectMapOffset = 0;
+ static const int kMapInstanceTypeOffset = sizeof(void*) + sizeof(int);
+ static const int kStringResourceOffset = 2 * sizeof(void*);
+ static const int kProxyProxyOffset = sizeof(void*);
+ static const int kJSObjectHeaderSize = 3 * sizeof(void*);
+ static const int kFullStringRepresentationMask = 0x07;
+ static const int kExternalTwoByteRepresentationTag = 0x03;
+ static const int kAlignedPointerShift = 2;
+
+ // These constants are compiler dependent so their values must be
+ // defined within the implementation.
+ static int kJSObjectType;
+ static int kFirstNonstringType;
+ static int kProxyType;
+
+ static inline bool HasHeapObjectTag(internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+ kHeapObjectTag);
+ }
+
+ static inline bool HasSmiTag(internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
+ }
+
+ static inline int SmiValue(internal::Object* value) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
+ }
+
+ static inline bool IsExternalTwoByteString(int instance_type) {
+ int representation = (instance_type & kFullStringRepresentationMask);
+ return representation == kExternalTwoByteRepresentationTag;
+ }
+
+ template <typename T>
+ static inline T ReadField(Object* ptr, int offset) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
+ return *reinterpret_cast<T*>(addr);
+ }
+
+};
+
+}
+
+
template <class T>
Handle<T>::Handle() : val_(0) { }
@@ -2648,7 +2766,7 @@ Local<T>::Local() : Handle<T>() { }
template <class T>
Local<T> Local<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Local<T>();
- void** p = reinterpret_cast<void**>(*that);
+ internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
}
@@ -2656,7 +2774,7 @@ Local<T> Local<T>::New(Handle<T> that) {
template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Persistent<T>();
- void** p = reinterpret_cast<void**>(*that);
+ internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
}
@@ -2664,21 +2782,21 @@ Persistent<T> Persistent<T>::New(Handle<T> that) {
template <class T>
bool Persistent<T>::IsNearDeath() const {
if (this->IsEmpty()) return false;
- return V8::IsGlobalNearDeath(reinterpret_cast<void**>(**this));
+ return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
}
template <class T>
bool Persistent<T>::IsWeak() const {
if (this->IsEmpty()) return false;
- return V8::IsGlobalWeak(reinterpret_cast<void**>(**this));
+ return V8::IsGlobalWeak(reinterpret_cast<internal::Object**>(**this));
}
template <class T>
void Persistent<T>::Dispose() {
if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<void**>(**this));
+ V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
}
@@ -2687,12 +2805,14 @@ Persistent<T>::Persistent() : Handle<T>() { }
template <class T>
void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
- V8::MakeWeak(reinterpret_cast<void**>(**this), parameters, callback);
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(**this),
+ parameters,
+ callback);
}
template <class T>
void Persistent<T>::ClearWeak() {
- V8::ClearWeak(reinterpret_cast<void**>(**this));
+ V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
}
Local<Value> Arguments::operator[](int i) const {
@@ -2748,7 +2868,8 @@ Local<Object> AccessorInfo::Holder() const {
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
- void** after = RawClose(reinterpret_cast<void**>(*value));
+ internal::Object** before = reinterpret_cast<internal::Object**>(*value);
+ internal::Object** after = RawClose(before);
return Local<T>(reinterpret_cast<T*>(after));
}
@@ -2777,6 +2898,171 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
}
+Local<Value> Object::GetInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ Local<Value> quick_result = UncheckedGetInternalField(index);
+ if (!quick_result.IsEmpty()) return quick_result;
+#endif
+ return CheckedGetInternalField(index);
+}
+
+
+Local<Value> Object::UncheckedGetInternalField(int index) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(this);
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ if (instance_type == I::kJSObjectType) {
+ // If the object is a plain JSObject, which is the common case,
+ // we know where to find the internal fields and can return the
+ // value directly.
+ int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index);
+ O* value = I::ReadField<O*>(obj, offset);
+ O** result = HandleScope::CreateHandle(value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+ } else {
+ return Local<Value>();
+ }
+}
+
+
+void* External::Unwrap(Handle<v8::Value> obj) {
+#ifdef V8_ENABLE_CHECKS
+ return FullUnwrap(obj);
+#else
+ return QuickUnwrap(obj);
+#endif
+}
+
+
+void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
+ if (I::HasSmiTag(obj)) {
+ int value = I::SmiValue(obj) << I::kAlignedPointerShift;
+ return reinterpret_cast<void*>(value);
+ } else {
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ if (instance_type == I::kProxyType) {
+ return I::ReadField<void*>(obj, I::kProxyProxyOffset);
+ } else {
+ return NULL;
+ }
+ }
+}
+
+
+void* Object::GetPointerFromInternalField(int index) {
+ return External::Unwrap(GetInternalField(index));
+}
+
+
+String* String::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<String*>(value);
+}
+
+
+String::ExternalStringResource* String::GetExternalStringResource() const {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ String::ExternalStringResource* result;
+ if (I::IsExternalTwoByteString(instance_type)) {
+ void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
+ result = reinterpret_cast<String::ExternalStringResource*>(value);
+ } else {
+ result = NULL;
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResource(result);
+#endif
+ return result;
+}
+
+
+bool Value::IsString() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsString();
+#else
+ return QuickIsString();
+#endif
+}
+
+bool Value::QuickIsString() const {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+ if (!I::HasHeapObjectTag(obj)) return false;
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ return (instance_type < I::kFirstNonstringType);
+}
+
+
+Number* Number::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Number*>(value);
+}
+
+
+Integer* Integer::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Integer*>(value);
+}
+
+
+Date* Date::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Date*>(value);
+}
+
+
+Object* Object::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Object*>(value);
+}
+
+
+Array* Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Array*>(value);
+}
+
+
+Function* Function::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Function*>(value);
+}
+
+
+External* External::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<External*>(value);
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc
index 7d97fc6..d92a0e0 100644
--- a/V8Binding/v8/src/api.cc
+++ b/V8Binding/v8/src/api.cc
@@ -100,7 +100,9 @@ static i::HandleScopeImplementer thread_local;
static FatalErrorCallback exception_behavior = NULL;
-
+int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
+int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
+int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
@@ -223,7 +225,8 @@ ImplementationUtilities::HandleScopeData*
#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(void** begin, void** end) {
+void ImplementationUtilities::ZapHandleRange(i::Object** begin,
+ i::Object** end) {
i::HandleScope::ZapRange(begin, end);
}
#endif
@@ -349,49 +352,47 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
}
-void** V8::GlobalizeReference(void** obj) {
+i::Object** V8::GlobalizeReference(i::Object** obj) {
if (IsDeadCheck("V8::Persistent::New")) return NULL;
LOG_API("Persistent::New");
i::Handle<i::Object> result =
- i::GlobalHandles::Create(*reinterpret_cast<i::Object**>(obj));
- return reinterpret_cast<void**>(result.location());
+ i::GlobalHandles::Create(*obj);
+ return result.location();
}
-void V8::MakeWeak(void** object, void* parameters,
+void V8::MakeWeak(i::Object** object, void* parameters,
WeakReferenceCallback callback) {
LOG_API("MakeWeak");
- i::GlobalHandles::MakeWeak(reinterpret_cast<i::Object**>(object), parameters,
- callback);
+ i::GlobalHandles::MakeWeak(object, parameters, callback);
}
-void V8::ClearWeak(void** obj) {
+void V8::ClearWeak(i::Object** obj) {
LOG_API("ClearWeak");
- i::GlobalHandles::ClearWeakness(reinterpret_cast<i::Object**>(obj));
+ i::GlobalHandles::ClearWeakness(obj);
}
-bool V8::IsGlobalNearDeath(void** obj) {
+bool V8::IsGlobalNearDeath(i::Object** obj) {
LOG_API("IsGlobalNearDeath");
if (!i::V8::IsRunning()) return false;
- return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
+ return i::GlobalHandles::IsNearDeath(obj);
}
-bool V8::IsGlobalWeak(void** obj) {
+bool V8::IsGlobalWeak(i::Object** obj) {
LOG_API("IsGlobalWeak");
if (!i::V8::IsRunning()) return false;
- return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
+ return i::GlobalHandles::IsWeak(obj);
}
-void V8::DisposeGlobal(void** obj) {
+void V8::DisposeGlobal(i::Object** obj) {
LOG_API("DisposeGlobal");
if (!i::V8::IsRunning()) return;
- i::Object** ptr = reinterpret_cast<i::Object**>(obj);
- if ((*ptr)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
- i::GlobalHandles::Destroy(ptr);
+ if ((*obj)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
+ i::GlobalHandles::Destroy(obj);
}
// --- H a n d l e s ---
@@ -415,9 +416,8 @@ int HandleScope::NumberOfHandles() {
}
-void** v8::HandleScope::CreateHandle(void* value) {
- return reinterpret_cast<void**>(
- i::HandleScope::CreateHandle(reinterpret_cast<i::Object*>(value)));
+i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
+ return i::HandleScope::CreateHandle(value);
}
@@ -481,7 +481,7 @@ v8::Local<v8::Value> Context::GetData() {
}
-void** v8::HandleScope::RawClose(void** value) {
+i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
"Local scope has already been closed")) {
@@ -490,13 +490,13 @@ void** v8::HandleScope::RawClose(void** value) {
LOG_API("CloseHandleScope");
// Read the result before popping the handle block.
- i::Object* result = reinterpret_cast<i::Object*>(*value);
+ i::Object* result = *value;
is_closed_ = true;
i::HandleScope::Leave(&previous_);
// Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result);
- return reinterpret_cast<void**>(handle.location());
+ return handle.location();
}
@@ -1459,9 +1459,11 @@ bool Value::IsFunction() const {
}
-bool Value::IsString() const {
+bool Value::FullIsString() const {
if (IsDeadCheck("v8::Value::IsString()")) return false;
- return Utils::OpenHandle(this)->IsString();
+ bool result = Utils::OpenHandle(this)->IsString();
+ ASSERT_EQ(result, QuickIsString());
+ return result;
}
@@ -1613,83 +1615,75 @@ Local<Integer> Value::ToInteger() const {
}
-External* External::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::External::Cast()")) return 0;
+void External::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsProxy(),
"v8::External::Cast()",
"Could not convert to external");
- return static_cast<External*>(that);
}
-v8::Object* v8::Object::Cast(Value* that) {
- if (IsDeadCheck("v8::Object::Cast()")) return 0;
+void v8::Object::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
"Could not convert to object");
- return static_cast<v8::Object*>(that);
}
-v8::Function* v8::Function::Cast(Value* that) {
- if (IsDeadCheck("v8::Function::Cast()")) return 0;
+void v8::Function::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
"Could not convert to function");
- return static_cast<v8::Function*>(that);
}
-v8::String* v8::String::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::String::Cast()")) return 0;
+void v8::String::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
"Could not convert to string");
- return static_cast<v8::String*>(that);
}
-v8::Number* v8::Number::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Number::Cast()")) return 0;
+void v8::Number::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
"Could not convert to number");
- return static_cast<v8::Number*>(that);
}
-v8::Integer* v8::Integer::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Integer::Cast()")) return 0;
+void v8::Integer::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
"Could not convert to number");
- return static_cast<v8::Integer*>(that);
}
-v8::Array* v8::Array::Cast(Value* that) {
- if (IsDeadCheck("v8::Array::Cast()")) return 0;
+void v8::Array::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
"Could not convert to array");
- return static_cast<v8::Array*>(that);
}
-v8::Date* v8::Date::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Date::Cast()")) return 0;
+void v8::Date::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
"v8::Date::Cast()",
"Could not convert to date");
- return static_cast<v8::Date*>(that);
}
@@ -2450,16 +2444,17 @@ bool v8::String::IsExternalAscii() const {
}
-v8::String::ExternalStringResource*
-v8::String::GetExternalStringResource() const {
- EnsureInitialized("v8::String::GetExternalStringResource()");
+void v8::String::VerifyExternalStringResource(
+ v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
+ v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- return reinterpret_cast<ExternalStringResource*>(resource);
+ expected = reinterpret_cast<ExternalStringResource*>(resource);
} else {
- return NULL;
+ expected = NULL;
}
+ CHECK_EQ(expected, value);
}
@@ -2519,7 +2514,7 @@ int v8::Object::InternalFieldCount() {
}
-Local<Value> v8::Object::GetInternalField(int index) {
+Local<Value> v8::Object::CheckedGetInternalField(int index) {
if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!ApiCheck(index < obj->GetInternalFieldCount(),
@@ -2528,7 +2523,12 @@ Local<Value> v8::Object::GetInternalField(int index) {
return Local<Value>();
}
i::Handle<i::Object> value(obj->GetInternalField(index));
- return Utils::ToLocal(value);
+ Local<Value> result = Utils::ToLocal(value);
+#ifdef DEBUG
+ Local<Value> unchecked = UncheckedGetInternalField(index);
+ ASSERT(unchecked.IsEmpty() || (unchecked == result));
+#endif
+ return result;
}
@@ -2546,41 +2546,8 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
}
-void* v8::Object::GetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* pointer = obj->GetInternalField(index);
- if (pointer->IsSmi()) {
- // Fast case, aligned native pointer.
- return pointer;
- }
-
- // Read from uninitialized field.
- if (!pointer->IsProxy()) {
- // Play safe even if it's something unexpected.
- ASSERT(pointer->IsUndefined());
- return NULL;
- }
-
- // Unaligned native pointer.
- return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy());
-}
-
-
void v8::Object::SetPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* as_object = reinterpret_cast<i::Object*>(value);
- if (as_object->IsSmi()) {
- // Aligned pointer, store as is.
- obj->SetInternalField(index, as_object);
- } else {
- // Currently internal fields are used by DOM wrappers which only
- // get garbage collected by the mark-sweep collector, so we
- // pretenure the proxy.
- HandleScope scope;
- i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null()) obj->SetInternalField(index, *proxy);
- }
+ SetInternalField(index, External::Wrap(value));
}
@@ -2604,13 +2571,15 @@ bool v8::V8::Dispose() {
}
-void v8::V8::IdleNotification(bool is_high_priority) {
- i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification(bool is_high_priority) {
+ if (!i::V8::IsRunning()) return false;
+ return i::V8::IdleNotification(is_high_priority);
}
void v8::V8::LowMemoryNotification() {
#if defined(ANDROID)
+ if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true);
#endif
}
@@ -2836,8 +2805,6 @@ static void* ExternalValueImpl(i::Handle<i::Object> obj) {
static const intptr_t kAlignedPointerMask = 3;
-static const int kAlignedPointerShift = 2;
-
Local<Value> v8::External::Wrap(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
@@ -2847,7 +2814,7 @@ Local<Value> v8::External::Wrap(void* data) {
if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
intptr_t data_value =
- static_cast<intptr_t>(data_ptr >> kAlignedPointerShift);
+ static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
if (i::Smi::IsIntptrValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
@@ -2858,16 +2825,22 @@ Local<Value> v8::External::Wrap(void* data) {
}
-void* v8::External::Unwrap(v8::Handle<v8::Value> value) {
+void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
if (IsDeadCheck("v8::External::Unwrap()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+ i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
+ void* result;
if (obj->IsSmi()) {
// The external value was an aligned pointer.
- uintptr_t result = static_cast<uintptr_t>(
- i::Smi::cast(*obj)->value()) << kAlignedPointerShift;
- return reinterpret_cast<void*>(result);
+ uintptr_t value = static_cast<uintptr_t>(
+ i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
+ result = reinterpret_cast<void*>(value);
+ } else if (obj->IsProxy()) {
+ result = ExternalValueImpl(obj);
+ } else {
+ result = NULL;
}
- return ExternalValueImpl(obj);
+ ASSERT_EQ(result, QuickUnwrap(wrapper));
+ return result;
}
@@ -3335,7 +3308,7 @@ void V8::ResumeProfilerEx(int flags) {
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
const int current_flags = i::Logger::GetActiveProfilerModules();
i::Logger::ResumeProfiler(flags);
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
i::Logger::PauseProfiler(~current_flags & flags);
} else {
i::Logger::ResumeProfiler(flags);
@@ -3729,19 +3702,17 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::Iterate(
ObjectVisitor* v,
- List<void**>* blocks,
+ List<i::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data) {
// Iterate over all handles in the blocks except for the last.
for (int i = blocks->length() - 2; i >= 0; --i) {
- Object** block =
- reinterpret_cast<Object**>(blocks->at(i));
+ Object** block = blocks->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
if (!blocks->is_empty()) {
- v->VisitPointers(reinterpret_cast<Object**>(blocks->last()),
- reinterpret_cast<Object**>(handle_data->next));
+ v->VisitPointers(blocks->last(), handle_data->next);
}
}
@@ -3756,7 +3727,7 @@ void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
HandleScopeImplementer* thread_local =
reinterpret_cast<HandleScopeImplementer*>(storage);
- List<void**>* blocks_of_archived_thread = thread_local->Blocks();
+ List<internal::Object**>* blocks_of_archived_thread = thread_local->Blocks();
v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread =
&thread_local->handle_scope_data_;
Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread);
diff --git a/V8Binding/v8/src/api.h b/V8Binding/v8/src/api.h
index f1057a8..ca8f523 100644
--- a/V8Binding/v8/src/api.h
+++ b/V8Binding/v8/src/api.h
@@ -338,7 +338,7 @@ class HandleScopeImplementer {
static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
- inline void** GetSpareOrNewBlock();
+ inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
inline void IncrementCallDepth() {call_depth++;}
@@ -356,13 +356,13 @@ class HandleScopeImplementer {
inline Handle<Object> RestoreContext();
inline bool HasSavedContexts();
- inline List<void**>* Blocks() { return &blocks; }
+ inline List<internal::Object**>* Blocks() { return &blocks; }
inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
private:
- List<void**> blocks;
+ List<internal::Object**> blocks;
Object** spare;
int call_depth;
// Used as a stack to keep track of entered contexts.
@@ -374,7 +374,7 @@ class HandleScopeImplementer {
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
static void Iterate(ObjectVisitor* v,
- List<void**>* blocks,
+ List<internal::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data);
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
@@ -420,10 +420,10 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope.
-void** HandleScopeImplementer::GetSpareOrNewBlock() {
- void** block = (spare != NULL) ?
- reinterpret_cast<void**>(spare) :
- NewArray<void*>(kHandleBlockSize);
+internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
+ internal::Object** block = (spare != NULL) ?
+ spare :
+ NewArray<internal::Object*>(kHandleBlockSize);
spare = NULL;
return block;
}
@@ -435,18 +435,18 @@ void HandleScopeImplementer::DeleteExtensions(int extensions) {
spare = NULL;
}
for (int i = extensions; i > 1; --i) {
- void** block = blocks.RemoveLast();
+ internal::Object** block = blocks.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = reinterpret_cast<Object**>(blocks.RemoveLast());
+ spare = blocks.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- reinterpret_cast<void**>(spare),
- reinterpret_cast<void**>(&spare[kHandleBlockSize]));
+ spare,
+ &spare[kHandleBlockSize]);
#endif
}
diff --git a/V8Binding/v8/src/apiutils.h b/V8Binding/v8/src/apiutils.h
index 5745343..8c791eb 100644
--- a/V8Binding/v8/src/apiutils.h
+++ b/V8Binding/v8/src/apiutils.h
@@ -60,7 +60,7 @@ class ImplementationUtilities {
static HandleScopeData* CurrentHandleScope();
#ifdef DEBUG
- static void ZapHandleRange(void** begin, void** end);
+ static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif
};
diff --git a/V8Binding/v8/src/arm/builtins-arm.cc b/V8Binding/v8/src/arm/builtins-arm.cc
index 28524c8..daf2378 100644
--- a/V8Binding/v8/src/arm/builtins-arm.cc
+++ b/V8Binding/v8/src/arm/builtins-arm.cc
@@ -214,9 +214,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Enter an internal frame.
__ EnterInternalFrame();
- // Setup the context from the function argument.
+ // Set up the context from the function argument.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(r10, Operand(roots_address));
+
// Push the function and the receiver onto the stack.
__ push(r1);
__ push(r2);
@@ -239,7 +243,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ mov(r4, Operand(Factory::undefined_value()));
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r7, Operand(r4));
@@ -282,7 +286,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done;
__ tst(r0, Operand(r0));
__ b(ne, &done);
- __ mov(r2, Operand(Factory::undefined_value()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
__ add(r0, r0, Operand(1));
__ bind(&done);
@@ -323,10 +327,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
- __ mov(r3, Operand(Factory::null_value()));
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- __ mov(r3, Operand(Factory::undefined_value()));
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
@@ -492,10 +496,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, kRecvOffset));
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
- __ mov(r1, Operand(Factory::null_value()));
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
- __ mov(r1, Operand(Factory::undefined_value()));
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
@@ -569,7 +573,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ mov(r4, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize));
}
@@ -665,7 +669,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r1: function
// r2: expected number of arguments
// r3: code entry to call
- __ mov(ip, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
__ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
diff --git a/V8Binding/v8/src/arm/cfg-arm.cc b/V8Binding/v8/src/arm/cfg-arm.cc
index 34e64b3..e0e563c 100644
--- a/V8Binding/v8/src/arm/cfg-arm.cc
+++ b/V8Binding/v8/src/arm/cfg-arm.cc
@@ -67,7 +67,7 @@ void EntryNode::Compile(MacroAssembler* masm) {
__ add(fp, sp, Operand(2 * kPointerSize));
int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
if (count > 0) {
- __ mov(ip, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < count; i++) {
__ push(ip);
}
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 71ffaa2..5c8b777 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -305,7 +305,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// sp: stack pointer
// fp: frame pointer
// cp: callee's context
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
function_return_.Bind();
if (FLAG_trace) {
@@ -478,11 +478,11 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
JumpTarget loaded;
JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
- __ mov(r0, Operand(Factory::false_value()));
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
frame_->EmitPush(r0);
loaded.Jump();
materialize_true.Bind();
- __ mov(r0, Operand(Factory::true_value()));
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
frame_->EmitPush(r0);
loaded.Bind();
cc_reg_ = al;
@@ -499,7 +499,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
// Load "true" if necessary.
if (true_target.is_linked()) {
true_target.Bind();
- __ mov(r0, Operand(Factory::true_value()));
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
frame_->EmitPush(r0);
}
// If both "true" and "false" need to be loaded jump across the code for
@@ -510,7 +510,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
// Load "false" if necessary.
if (false_target.is_linked()) {
false_target.Bind();
- __ mov(r0, Operand(Factory::false_value()));
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
frame_->EmitPush(r0);
}
// A value is loaded on all paths reaching this point.
@@ -640,15 +640,18 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
// Fast case checks
// Check if the value is 'false'.
- __ cmp(r0, Operand(Factory::false_value()));
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
false_target->Branch(eq);
// Check if the value is 'true'.
- __ cmp(r0, Operand(Factory::true_value()));
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
true_target->Branch(eq);
// Check if the value is 'undefined'.
- __ cmp(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
false_target->Branch(eq);
// Check if the value is a smi.
@@ -661,7 +664,8 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kToBool, 1);
// Convert the result (r0) to a condition code.
- __ cmp(r0, Operand(Factory::false_value()));
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
cc_reg_ = ne;
}
@@ -1106,8 +1110,10 @@ void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
StackCheckStub stub;
- frame_->CallStub(&stub, 0);
+ __ CallStub(&stub, lo); // Call the stub if lower.
}
}
@@ -1185,7 +1191,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- __ mov(r0, Operand(Factory::the_hole_value()));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
frame_->EmitPush(r0);
} else if (node->fun() != NULL) {
LoadAndSpill(node->fun());
@@ -1725,9 +1731,11 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
frame_->EmitPop(r0);
- __ cmp(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
exit.Branch(eq);
- __ cmp(r0, Operand(Factory::null_value()));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r0, ip);
exit.Branch(eq);
// Stack layout in body:
@@ -1759,7 +1767,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Otherwise, we got a FixedArray, and we have to do a slow check.
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Factory::meta_map()));
+ __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+ __ cmp(r1, ip);
fixed_array.Branch(ne);
// Get enum cache
@@ -1833,7 +1842,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
- __ cmp(r3, Operand(Factory::null_value()));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r3, ip);
node->continue_target()->Branch(eq);
end_del_check.Bind();
@@ -2093,7 +2103,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// Fake a top of stack value (unneeded when FALLING) and set the
// state in r2, then jump around the unlink blocks if any.
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
__ mov(r2, Operand(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) {
@@ -2135,7 +2145,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
frame_->EmitPush(r0);
} else {
// Fake TOS for targets that shadowed breaks and continues.
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
}
__ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
@@ -2322,8 +2332,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
r2,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ cmp(r0, Operand(Factory::the_hole_value()));
- __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
// There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around
@@ -2360,8 +2371,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// value.
Comment cmnt(masm_, "[ Unhole const");
frame_->EmitPop(r0);
- __ cmp(r0, Operand(Factory::the_hole_value()));
- __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
frame_->EmitPush(r0);
}
}
@@ -2404,7 +2416,8 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
__ bind(&next);
// Terminate at global context.
__ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
- __ cmp(tmp2, Operand(Factory::global_context_map()));
+ __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ __ cmp(tmp2, ip);
__ b(eq, &fast);
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
@@ -2501,7 +2514,8 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
__ ldr(r2, FieldMemOperand(r1, literal_offset));
JumpTarget done;
- __ cmp(r2, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, ip);
done.Branch(ne);
// If the entry is undefined we call the runtime system to computed
@@ -2583,7 +2597,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
- __ cmp(r2, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, Operand(ip));
deferred->Branch(eq);
deferred->BindExit();
@@ -2705,7 +2720,8 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
- __ cmp(r2, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, Operand(ip));
deferred->Branch(eq);
deferred->BindExit();
@@ -3036,7 +3052,7 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
// Prepare stack for call to resolved function.
LoadAndSpill(function);
- __ mov(r2, Operand(Factory::undefined_value()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r2); // Slot for receiver
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
@@ -3180,7 +3196,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// Non-JS objects have class null.
null.Bind();
- __ mov(r0, Operand(Factory::null_value()));
+ __ LoadRoot(r0, Heap::kNullValueRootIndex);
frame_->EmitPush(r0);
// All done.
@@ -3253,7 +3269,7 @@ void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
__ CallRuntime(Runtime::kLog, 2);
}
#endif
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
}
@@ -3274,7 +3290,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r0);
}
@@ -3308,7 +3324,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
@@ -3494,14 +3510,14 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- __ mov(r0, Operand(Factory::false_value()));
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
}
} else {
// Default: Result of deleting expressions is true.
LoadAndSpill(node->expression()); // may have side-effects
frame_->Drop();
- __ mov(r0, Operand(Factory::true_value()));
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
}
frame_->EmitPush(r0);
@@ -3554,7 +3570,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::VOID:
// since the stack top is cached in r0, popping and then
// pushing a value can be done by just writing to r0.
- __ mov(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
break;
case Token::ADD: {
@@ -3880,14 +3896,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
if (left_is_null || right_is_null) {
LoadAndSpill(left_is_null ? right : left);
frame_->EmitPop(r0);
- __ cmp(r0, Operand(Factory::null_value()));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r0, ip);
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
if (op != Token::EQ_STRICT) {
true_target()->Branch(eq);
- __ cmp(r0, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(ip));
true_target()->Branch(eq);
__ tst(r0, Operand(kSmiTagMask));
@@ -3924,7 +3942,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ tst(r1, Operand(kSmiTagMask));
true_target()->Branch(eq);
__ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Factory::heap_number_map()));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, ip);
cc_reg_ = eq;
} else if (check->Equals(Heap::string_symbol())) {
@@ -3944,13 +3963,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
cc_reg_ = lt;
} else if (check->Equals(Heap::boolean_symbol())) {
- __ cmp(r1, Operand(Factory::true_value()));
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
true_target()->Branch(eq);
- __ cmp(r1, Operand(Factory::false_value()));
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
cc_reg_ = eq;
} else if (check->Equals(Heap::undefined_symbol())) {
- __ cmp(r1, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r1, ip);
true_target()->Branch(eq);
__ tst(r1, Operand(kSmiTagMask));
@@ -3975,7 +3997,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
false_target()->Branch(eq);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Factory::null_value()));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
true_target()->Branch(eq);
// It can be an undetectable object.
@@ -4206,7 +4229,8 @@ void Reference::SetValue(InitState init_state) {
// executed, the code is identical to a normal store (see below).
Comment cmnt(masm, "[ Init const");
__ ldr(r2, cgen_->SlotOperand(slot, r2));
- __ cmp(r2, Operand(Factory::the_hole_value()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
exit.Branch(ne);
}
@@ -4939,7 +4963,7 @@ static void AllocateHeapNumber(
// Tag and adjust back to start of new object.
__ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
// Get heap number map into scratch2.
- __ mov(scratch2, Operand(Factory::heap_number_map()));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
// Store heap number map in new object.
__ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
}
@@ -5601,17 +5625,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void StackCheckStub::Generate(MacroAssembler* masm) {
- Label within_limit;
- __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
- __ ldr(ip, MemOperand(ip));
- __ cmp(sp, Operand(ip));
- __ b(hs, &within_limit);
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
- __ bind(&within_limit);
__ StubReturn(1);
}
@@ -5962,9 +5980,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ mov(r7, Operand(~ArgumentsAdaptorFrame::SENTINEL));
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
__ ldr(r5, MemOperand(r5));
@@ -6090,7 +6108,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ cmp(r2, Operand(r4));
__ b(eq, &is_instance);
- __ cmp(r2, Operand(Factory::null_value()));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r2, ip);
__ b(eq, &is_not_instance);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
@@ -6120,7 +6139,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -6149,7 +6168,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
@@ -6191,7 +6210,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &runtime);
// Patch the arguments.length and the parameters pointer.
diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc
index d193ab9..0abe35b 100644
--- a/V8Binding/v8/src/arm/disasm-arm.cc
+++ b/V8Binding/v8/src/arm/disasm-arm.cc
@@ -842,7 +842,7 @@ static const int kMaxRegisters = 16;
// formatting. See for example the command "objdump -d <binary file>".
static const char* reg_names[kMaxRegisters] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc",
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
};
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
index 8781256..848d04b 100644
--- a/V8Binding/v8/src/arm/ic-arm.cc
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -87,7 +87,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Check that the properties array is a dictionary.
__ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Factory::hash_table_map()));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
__ b(ne, miss);
// Compute the capacity mask.
@@ -254,9 +255,11 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for boolean.
__ bind(&non_string);
- __ cmp(r1, Operand(Factory::true_value()));
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
__ b(eq, &boolean);
- __ cmp(r1, Operand(Factory::false_value()));
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
__ b(ne, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -582,7 +585,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Factory::fixed_array_map()));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r3, ip);
__ b(ne, &slow);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
@@ -601,7 +605,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&fast);
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
- __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
@@ -661,7 +666,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Factory::fixed_array_map()));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r2, ip);
__ b(ne, &slow);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
@@ -710,7 +716,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&array);
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ cmp(r1, Operand(Factory::fixed_array_map()));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r1, ip);
__ b(ne, &slow);
// Check the key against the length in the array, compute the
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.cc b/V8Binding/v8/src/arm/macro-assembler-arm.cc
index 4b02e2d..65c2a3e 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.cc
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.cc
@@ -174,6 +174,13 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
}
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond) {
+ ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+}
+
+
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -714,7 +721,8 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the global_context_map.
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- cmp(holder_reg, Operand(Factory::global_context_map()));
+ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ cmp(holder_reg, ip);
Check(eq, "JSGlobalObject::global_context should be a global context.");
pop(holder_reg); // Restore holder.
}
@@ -731,11 +739,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, ip); // Move ip to its holding place.
- cmp(holder_reg, Operand(Factory::null_value()));
+ LoadRoot(ip, Heap::kNullValueRootIndex);
+ cmp(holder_reg, ip);
Check(ne, "JSGlobalProxy::context() should not be null.");
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, Operand(Factory::global_context_map()));
+ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+ cmp(holder_reg, ip);
Check(eq, "JSGlobalObject::global_context should be a global context.");
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
@@ -792,7 +802,8 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(result, Operand(Factory::the_hole_value()));
+ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ cmp(result, ip);
b(eq, miss);
// If the function does not have an initial map, we're done.
@@ -814,9 +825,9 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
@@ -832,7 +843,7 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
}
- mov(r0, Operand(Factory::undefined_value()));
+ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.h b/V8Binding/v8/src/arm/macro-assembler-arm.h
index ab74805..e4758cc 100644
--- a/V8Binding/v8/src/arm/macro-assembler-arm.h
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.h
@@ -89,6 +89,10 @@ class MacroAssembler: public Assembler {
void Ret(Condition cond = al);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index,
+ Condition cond = al);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
@@ -227,7 +231,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub);
+ void CallStub(CodeStub* stub, Condition cond = al);
void CallJSExitStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc
index e5500aa..d12ddbf 100644
--- a/V8Binding/v8/src/arm/simulator-arm.cc
+++ b/V8Binding/v8/src/arm/simulator-arm.cc
@@ -1051,7 +1051,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
}
set_register(r0, lo_res);
set_register(r1, hi_res);
- set_register(r0, result);
}
set_register(lr, saved_lr);
set_pc(get_register(lr));
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
index 393db59..1581428 100644
--- a/V8Binding/v8/src/arm/stub-cache-arm.cc
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -395,7 +395,8 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ mov(scratch, Operand(Handle<Object>(cell)));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ cmp(scratch, Operand(Factory::the_hole_value()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
__ b(ne, miss);
}
object = JSObject::cast(object->GetPrototype());
@@ -667,9 +668,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case BOOLEAN_CHECK: {
Label fast;
// Check that the object is a boolean.
- __ cmp(r1, Operand(Factory::true_value()));
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
__ b(eq, &fast);
- __ cmp(r1, Operand(Factory::false_value()));
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
__ b(ne, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
@@ -688,7 +691,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ cmp(r2, Operand(Factory::fixed_array_map()));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r2, ip);
__ b(ne, &miss);
break;
@@ -1108,7 +1112,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
__ b(eq, &miss);
}
@@ -1337,6 +1342,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // Not implemented yet - just jump to generic stub.
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc
index d3dabf8..9795860 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.cc
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc
@@ -102,7 +102,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
// on r2 being available for use.
- { Label map_check, done;
+ if (FLAG_debug_code) {
+ Label map_check, done;
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
@@ -139,7 +140,7 @@ void VirtualFrame::AllocateStackSlots() {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
// Initialize stack slots with 'undefined' value.
- __ mov(ip, Operand(Factory::undefined_value()));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < count; i++) {
__ push(ip);
}
diff --git a/V8Binding/v8/src/assembler.cc b/V8Binding/v8/src/assembler.cc
index 5d0310d..546490e 100644
--- a/V8Binding/v8/src/assembler.cc
+++ b/V8Binding/v8/src/assembler.cc
@@ -563,6 +563,11 @@ ExternalReference ExternalReference::the_hole_value_location() {
}
+ExternalReference ExternalReference::roots_address() {
+ return ExternalReference(Heap::roots_address());
+}
+
+
ExternalReference ExternalReference::address_of_stack_guard_limit() {
return ExternalReference(StackGuard::address_of_jslimit());
}
diff --git a/V8Binding/v8/src/assembler.h b/V8Binding/v8/src/assembler.h
index 1ddc8a3..e217918 100644
--- a/V8Binding/v8/src/assembler.h
+++ b/V8Binding/v8/src/assembler.h
@@ -401,6 +401,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
+ // Static variable Heap::roots_address()
+ static ExternalReference roots_address();
+
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_guard_limit();
diff --git a/V8Binding/v8/src/builtins.cc b/V8Binding/v8/src/builtins.cc
index dbd18f8..4262dd2 100644
--- a/V8Binding/v8/src/builtins.cc
+++ b/V8Binding/v8/src/builtins.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
#include "ic-inl.h"
@@ -47,17 +48,13 @@ namespace internal {
// BUILTIN_END
//
// In the body of the builtin function, the variable 'receiver' is visible.
-// The arguments can be accessed through:
+// The arguments can be accessed through the Arguments object args.
//
-// BUILTIN_ARG(0): Receiver (also available as 'receiver')
-// BUILTIN_ARG(1): First argument
+// args[0]: Receiver (also available as 'receiver')
+// args[1]: First argument
// ...
-// BUILTIN_ARG(n): Last argument
-//
-// and they evaluate to undefined values if too few arguments were
-// passed to the builtin function invocation.
-//
-// __argc__ is the number of arguments including the receiver.
+// args[n]: Last argument
+// args.length(): Number of arguments including the receiver.
// ----------------------------------------------------------------------------
@@ -65,21 +62,8 @@ namespace internal {
// builtin was invoked as a constructor as part of the
// arguments. Maybe we also want to pass the called function?
#define BUILTIN(name) \
- static Object* Builtin_##name(int __argc__, Object** __argv__) { \
- Handle<Object> receiver(&__argv__[0]);
-
-
-// Use an inline function to avoid evaluating the index (n) more than
-// once in the BUILTIN_ARG macro.
-static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
- ASSERT(n >= 0);
- return (argc > n) ? argv[-n] : Heap::undefined_value();
-}
-
-
-// NOTE: Argument 0 is the receiver. The first 'real' argument is
-// argument 1 - BUILTIN_ARG(1).
-#define BUILTIN_ARG(n) (__builtin_arg__(n, __argc__, __argv__))
+ static Object* Builtin_##name(Arguments args) { \
+ Handle<Object> receiver = args.at<Object>(0);
#define BUILTIN_END \
@@ -168,8 +152,8 @@ BUILTIN(ArrayCode) {
// Optimize the case where there is one argument and the argument is a
// small smi.
- if (__argc__ == 2) {
- Object* obj = BUILTIN_ARG(1);
+ if (args.length() == 2) {
+ Object* obj = args[1];
if (obj->IsSmi()) {
int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
@@ -182,14 +166,14 @@ BUILTIN(ArrayCode) {
// Take the argument as the length.
obj = array->Initialize(0);
if (obj->IsFailure()) return obj;
- if (__argc__ == 2) return array->SetElementsLength(BUILTIN_ARG(1));
+ if (args.length() == 2) return array->SetElementsLength(args[1]);
}
// Optimize the case where there are no parameters passed.
- if (__argc__ == 1) return array->Initialize(4);
+ if (args.length() == 1) return array->Initialize(4);
// Take the arguments as elements.
- int number_of_elements = __argc__ - 1;
+ int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements);
Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
if (obj->IsFailure()) return obj;
@@ -197,7 +181,7 @@ BUILTIN(ArrayCode) {
WriteBarrierMode mode = elms->GetWriteBarrierMode();
// Fill in the content
for (int index = 0; index < number_of_elements; index++) {
- elms->set(index, BUILTIN_ARG(index+1), mode);
+ elms->set(index, args[index+1], mode);
}
// Set length and elements on the array.
@@ -217,13 +201,13 @@ BUILTIN(ArrayPush) {
int len = Smi::cast(array->length())->value();
// Set new length.
- int new_length = len + __argc__ - 1;
+ int new_length = len + args.length() - 1;
FixedArray* elms = FixedArray::cast(array->elements());
if (new_length <= elms->length()) {
// Backing storage has extra space for the provided values.
- for (int index = 0; index < __argc__ - 1; index++) {
- elms->set(index + len, BUILTIN_ARG(index+1));
+ for (int index = 0; index < args.length() - 1; index++) {
+ elms->set(index + len, args[index+1]);
}
} else {
// New backing storage is needed.
@@ -235,8 +219,8 @@ BUILTIN(ArrayPush) {
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values.
- for (int index = 0; index < __argc__ - 1; index++) {
- new_elms->set(index + len, BUILTIN_ARG(index+1), mode);
+ for (int index = 0; index < args.length() - 1; index++) {
+ new_elms->set(index + len, args[index+1], mode);
}
// Set the new backing storage.
array->set_elements(new_elms);
@@ -353,7 +337,7 @@ BUILTIN(HandleApiCall) {
FunctionTemplateInfo* fun_data =
FunctionTemplateInfo::cast(function->shared()->function_data());
- Object* raw_holder = TypeCheck(__argc__, __argv__, fun_data);
+ Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
@@ -380,19 +364,19 @@ BUILTIN(HandleApiCall) {
Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
- v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
holder,
callee,
is_construct,
- reinterpret_cast<void**>(__argv__ - 1),
- __argc__ - 1);
+ reinterpret_cast<void**>(&args[0] - 1),
+ args.length() - 1);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- value = callback(args);
+ value = callback(new_args);
}
if (value.IsEmpty()) {
result = Heap::undefined_value();
@@ -413,13 +397,12 @@ BUILTIN_END
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
- int __argc__,
- Object** __argv__) {
+ Arguments args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor());
- Handle<Object> receiver(&__argv__[0]);
+ Handle<Object> receiver = args.at<Object>(0);
// Get the object called.
JSObject* obj = JSObject::cast(*receiver);
@@ -448,18 +431,18 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
Handle<JSFunction> callee_handle(constructor);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
- v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
self,
callee,
is_construct_call,
- reinterpret_cast<void**>(__argv__ - 1),
- __argc__ - 1);
+ reinterpret_cast<void**>(&args[0] - 1),
+ args.length() - 1);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- value = callback(args);
+ value = callback(new_args);
}
if (value.IsEmpty()) {
result = Heap::undefined_value();
@@ -476,7 +459,7 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(false, __argc__, __argv__);
+ return HandleApiCallAsFunctionOrConstructor(false, args);
}
BUILTIN_END
@@ -484,7 +467,7 @@ BUILTIN_END
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(true, __argc__, __argv__);
+ return HandleApiCallAsFunctionOrConstructor(true, args);
}
BUILTIN_END
diff --git a/V8Binding/v8/src/compiler.cc b/V8Binding/v8/src/compiler.cc
index feff492..15f6479 100644
--- a/V8Binding/v8/src/compiler.cc
+++ b/V8Binding/v8/src/compiler.cc
@@ -425,6 +425,13 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+ // Set the optimication hints after performing lazy compilation, as these are
+ // not set when the function is set up as a lazily compiled function.
+ shared->SetThisPropertyAssignmentsInfo(
+ lit->has_only_this_property_assignments(),
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+
// Check the function has compiled code.
ASSERT(shared->is_compiled());
return true;
diff --git a/V8Binding/v8/src/debug-delay.js b/V8Binding/v8/src/debug-delay.js
index 4f60851..ce70c75 100644
--- a/V8Binding/v8/src/debug-delay.js
+++ b/V8Binding/v8/src/debug-delay.js
@@ -466,9 +466,14 @@ Debug.source = function(f) {
return %FunctionGetSourceCode(f);
};
-Debug.assembler = function(f) {
+Debug.disassemble = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetAssemblerCode(f);
+ return %DebugDisassembleFunction(f);
+};
+
+Debug.disassembleConstructor = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %DebugDisassembleConstructor(f);
};
Debug.sourcePosition = function(f) {
diff --git a/V8Binding/v8/src/debug.cc b/V8Binding/v8/src/debug.cc
index f2a2814..faeb29b 100644
--- a/V8Binding/v8/src/debug.cc
+++ b/V8Binding/v8/src/debug.cc
@@ -1548,8 +1548,8 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which is no longer referenced.
- Heap::CollectAllGarbage();
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
+ Heap::CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@@ -1599,7 +1599,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc
index 0ad55bd..18c5bb8 100644
--- a/V8Binding/v8/src/execution.cc
+++ b/V8Binding/v8/src/execution.cc
@@ -234,8 +234,9 @@ StackGuard::StackGuard() {
(thread_local_.climit_ == kInterruptLimit &&
thread_local_.interrupt_flags_ != 0));
- thread_local_.initial_jslimit_ = thread_local_.jslimit_ =
- GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
+ Heap::SetStackLimit(limit);
// NOTE: The check for overflow is not safe as there is no guarantee that
// the running thread has its stack in all memory up to address 0x00000000.
thread_local_.initial_climit_ = thread_local_.climit_ =
@@ -283,6 +284,7 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
// leave them alone.
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
thread_local_.jslimit_ = limit;
+ Heap::SetStackLimit(limit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit;
@@ -397,6 +399,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ Heap::SetStackLimit(thread_local_.jslimit_);
return from + sizeof(ThreadLocal);
}
@@ -677,7 +680,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
// All allocation spaces other than NEW_SPACE have the same effect.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
return v8::Undefined();
}
diff --git a/V8Binding/v8/src/execution.h b/V8Binding/v8/src/execution.h
index 456cbe7..4cdfd2b 100644
--- a/V8Binding/v8/src/execution.h
+++ b/V8Binding/v8/src/execution.h
@@ -175,6 +175,10 @@ class StackGuard BASE_EMBEDDED {
#endif
static void Continue(InterruptFlag after_what);
+ static uintptr_t jslimit() {
+ return thread_local_.jslimit_;
+ }
+
private:
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
@@ -188,6 +192,7 @@ class StackGuard BASE_EMBEDDED {
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
+ Heap::SetStackLimit(value);
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
}
@@ -200,6 +205,7 @@ class StackGuard BASE_EMBEDDED {
set_limits(kIllegalLimit, lock);
} else {
thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ Heap::SetStackLimit(thread_local_.jslimit_);
thread_local_.climit_ = thread_local_.initial_climit_;
}
}
@@ -220,13 +226,15 @@ class StackGuard BASE_EMBEDDED {
class ThreadLocal {
public:
ThreadLocal()
- : initial_jslimit_(kIllegalLimit),
- jslimit_(kIllegalLimit),
- initial_climit_(kIllegalLimit),
- climit_(kIllegalLimit),
- nesting_(0),
- postpone_interrupts_nesting_(0),
- interrupt_flags_(0) {}
+ : initial_jslimit_(kIllegalLimit),
+ jslimit_(kIllegalLimit),
+ initial_climit_(kIllegalLimit),
+ climit_(kIllegalLimit),
+ nesting_(0),
+ postpone_interrupts_nesting_(0),
+ interrupt_flags_(0) {
+ Heap::SetStackLimit(kIllegalLimit);
+ }
uintptr_t initial_jslimit_;
uintptr_t jslimit_;
uintptr_t initial_climit_;
diff --git a/V8Binding/v8/src/frames-inl.h b/V8Binding/v8/src/frames-inl.h
index b04cf50..c5f2f1a 100644
--- a/V8Binding/v8/src/frames-inl.h
+++ b/V8Binding/v8/src/frames-inl.h
@@ -128,8 +128,9 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- int context = Memory::int_at(fp + StandardFrameConstants::kContextOffset);
- return context == ArgumentsAdaptorFrame::SENTINEL;
+ Object* marker =
+ Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
+ return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
}
diff --git a/V8Binding/v8/src/frames.h b/V8Binding/v8/src/frames.h
index f002e12..768196d 100644
--- a/V8Binding/v8/src/frames.h
+++ b/V8Binding/v8/src/frames.h
@@ -434,15 +434,6 @@ class JavaScriptFrame: public StandardFrame {
// match the formal number of parameters.
class ArgumentsAdaptorFrame: public JavaScriptFrame {
public:
- // This sentinel value is temporarily used to distinguish arguments
- // adaptor frames from ordinary JavaScript frames. If a frame has
- // the sentinel as its context, it is an arguments adaptor frame. It
- // must be tagged as a small integer to avoid GC issues. Crud.
- enum {
- SENTINEL = (1 << kSmiTagSize) | kSmiTag,
- NON_SENTINEL = ~SENTINEL
- };
-
virtual Type type() const { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
diff --git a/V8Binding/v8/src/global-handles.cc b/V8Binding/v8/src/global-handles.cc
index f868974..e51c4aa 100644
--- a/V8Binding/v8/src/global-handles.cc
+++ b/V8Binding/v8/src/global-handles.cc
@@ -144,8 +144,8 @@ class GlobalHandles::Node : public Malloced {
// Returns the callback for this weak handle.
WeakReferenceCallback callback() { return callback_; }
- void PostGarbageCollectionProcessing() {
- if (state_ != Node::PENDING) return;
+ bool PostGarbageCollectionProcessing() {
+ if (state_ != Node::PENDING) return false;
LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
void* par = parameter();
state_ = NEAR_DEATH;
@@ -153,18 +153,19 @@ class GlobalHandles::Node : public Malloced {
// The callback function is resolved as late as possible to preserve old
// behavior.
WeakReferenceCallback func = callback();
- if (func != NULL) {
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
- {
- // Forbid reuse of destroyed nodes as they might be already deallocated.
- // It's fine though to reuse nodes that were destroyed in weak callback
- // as those cannot be deallocated until we are back from the callback.
- set_first_free(NULL);
- // Leaving V8.
- VMState state(EXTERNAL);
- func(object, par);
- }
+ if (func == NULL) return false;
+
+ v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
+ {
+ // Forbid reuse of destroyed nodes as they might be already deallocated.
+ // It's fine though to reuse nodes that were destroyed in weak callback
+ // as those cannot be deallocated until we are back from the callback.
+ set_first_free(NULL);
+ // Leaving V8.
+ VMState state(EXTERNAL);
+ func(object, par);
}
+ return true;
}
// Place the handle address first to avoid offset computation.
@@ -275,15 +276,26 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
}
+int post_gc_processing_count = 0;
+
void GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
+ const int initial_post_gc_processing_count = ++post_gc_processing_count;
Node** p = &head_;
while (*p != NULL) {
- (*p)->PostGarbageCollectionProcessing();
+ if ((*p)->PostGarbageCollectionProcessing()) {
+ if (initial_post_gc_processing_count != post_gc_processing_count) {
+ // Weak callback triggered another GC and another round of
+ // PostGarbageCollection processing. The current node might
+ // have been deleted in that round, so we need to bail out (or
+ // restart the processing).
+ break;
+ }
+ }
if ((*p)->state_ == Node::DESTROYED) {
// Delete the link.
Node* node = *p;
diff --git a/V8Binding/v8/src/globals.h b/V8Binding/v8/src/globals.h
index 195a2e2..3b8ee92 100644
--- a/V8Binding/v8/src/globals.h
+++ b/V8Binding/v8/src/globals.h
@@ -134,17 +134,6 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-// Tag information for HeapObject.
-const int kHeapObjectTag = 1;
-const int kHeapObjectTagSize = 2;
-const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
-
-
-// Tag information for Smi.
-const int kSmiTag = 0;
-const int kSmiTagSize = 1;
-const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-
// Tag information for Failure.
const int kFailureTag = 3;
@@ -429,9 +418,6 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-#define HAS_HEAP_OBJECT_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag)
-
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
diff --git a/V8Binding/v8/src/handles.cc b/V8Binding/v8/src/handles.cc
index 6345d41..fae006a 100644
--- a/V8Binding/v8/src/handles.cc
+++ b/V8Binding/v8/src/handles.cc
@@ -53,8 +53,8 @@ int HandleScope::NumberOfHandles() {
}
-void** HandleScope::Extend() {
- void** result = current_.next;
+Object** HandleScope::Extend() {
+ Object** result = current_.next;
ASSERT(result == current_.limit);
// Make sure there's at least one scope on the stack and that the
@@ -68,7 +68,7 @@ void** HandleScope::Extend() {
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
if (!impl->Blocks()->is_empty()) {
- void** limit = &impl->Blocks()->last()[kHandleBlockSize];
+ Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) {
current_.limit = limit;
}
@@ -96,10 +96,10 @@ void HandleScope::DeleteExtensions() {
}
-void HandleScope::ZapRange(void** start, void** end) {
+void HandleScope::ZapRange(Object** start, Object** end) {
if (start == NULL) return;
- for (void** p = start; p < end; p++) {
- *p = reinterpret_cast<void*>(v8::internal::kHandleZapValue);
+ for (Object** p = start; p < end; p++) {
+ *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
}
}
diff --git a/V8Binding/v8/src/handles.h b/V8Binding/v8/src/handles.h
index 8c9cbeb..847aebb 100644
--- a/V8Binding/v8/src/handles.h
+++ b/V8Binding/v8/src/handles.h
@@ -121,7 +121,7 @@ class HandleScope {
// Creates a new handle with the given value.
template <typename T>
static inline T** CreateHandle(T* value) {
- void** cur = current_.next;
+ internal::Object** cur = current_.next;
if (cur == current_.limit) cur = Extend();
// Update the current next field, set the value in the created
// handle, and return the result.
@@ -164,13 +164,13 @@ class HandleScope {
}
// Extend the handle scope making room for more handles.
- static void** Extend();
+ static internal::Object** Extend();
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
// Zaps the handles in the half-open interval [start, end).
- static void ZapRange(void** start, void** end);
+ static void ZapRange(internal::Object** start, internal::Object** end);
friend class v8::HandleScope;
friend class v8::ImplementationUtilities;
diff --git a/V8Binding/v8/src/heap-inl.h b/V8Binding/v8/src/heap-inl.h
index 114ae0d..0646878 100644
--- a/V8Binding/v8/src/heap-inl.h
+++ b/V8Binding/v8/src/heap-inl.h
@@ -238,7 +238,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
- CollectAllGarbage();
+ CollectAllGarbage(false);
}
} else {
// Avoid underflow.
@@ -285,7 +285,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
- Heap::CollectAllGarbage(); \
+ Heap::CollectAllGarbage(false); \
{ \
AlwaysAllocateScope __scope__; \
__object__ = FUNCTION_CALL; \
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index 9b55e07..3b2bd40 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -332,7 +332,7 @@ void Heap::CollectAllGarbageIfContextDisposed() {
// informed decisions about when to force a collection.
if (!FLAG_expose_gc && context_disposed_pending_) {
HistogramTimerScope scope(&Counters::gc_context);
- CollectAllGarbage();
+ CollectAllGarbage(false);
}
context_disposed_pending_ = false;
}
@@ -467,6 +467,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_exhausted_ = false;
}
Scavenge();
+
Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing();
@@ -487,7 +488,10 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
void Heap::PostGarbageCollectionProcessing() {
// Process weak handles post gc.
- GlobalHandles::PostGarbageCollectionProcessing();
+ {
+ DisableAssertNoAllocation allow_allocation;
+ GlobalHandles::PostGarbageCollectionProcessing();
+ }
// Update flat string readers.
FlatStringReader::PostGarbageCollectionProcessing();
}
@@ -665,8 +669,6 @@ void Heap::Scavenge() {
survived_since_last_expansion_ > new_space_.Capacity()) {
// Grow the size of new space if there is room to grow and enough
// data has survived scavenge since the last expansion.
- // TODO(1240712): NewSpace::Grow has a return value which is
- // ignored here.
new_space_.Grow();
survived_since_last_expansion_ = 0;
}
@@ -2089,8 +2091,9 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
if (count > in_object_properties) {
count = in_object_properties;
}
- DescriptorArray* descriptors = *Factory::NewDescriptorArray(count);
- if (descriptors->IsFailure()) return descriptors;
+ Object* descriptors_obj = DescriptorArray::Allocate(count);
+ if (descriptors_obj->IsFailure()) return descriptors_obj;
+ DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsSymbol());
@@ -2776,6 +2779,41 @@ STRUCT_LIST(MAKE_CASE)
}
+bool Heap::IdleNotification() {
+ static const int kIdlesBeforeCollection = 7;
+ static int number_idle_notifications = 0;
+ static int last_gc_count = gc_count_;
+
+ bool finished = false;
+
+ if (last_gc_count == gc_count_) {
+ number_idle_notifications++;
+ } else {
+ number_idle_notifications = 0;
+ last_gc_count = gc_count_;
+ }
+
+ if (number_idle_notifications >= kIdlesBeforeCollection) {
+ // The first time through we collect without forcing compaction.
+ // The second time through we force compaction and quit.
+ bool force_compaction =
+ number_idle_notifications > kIdlesBeforeCollection;
+ CollectAllGarbage(force_compaction);
+ last_gc_count = gc_count_;
+ if (force_compaction) {
+ // Shrink new space.
+ new_space_.Shrink();
+ number_idle_notifications = 0;
+ finished = true;
+ }
+ }
+
+ // Uncommit unused memory in new space.
+ Heap::UncommitFromSpace();
+ return finished;
+}
+
+
#ifdef DEBUG
void Heap::Print() {
@@ -2941,7 +2979,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
- ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
+ ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh();
a += kPointerSize) {
@@ -3214,6 +3252,19 @@ bool Heap::Setup(bool create_heap_objects) {
}
+void Heap::SetStackLimit(intptr_t limit) {
+ // We don't use the stack limit in the roots array on x86-64 yet, but since
+ // pointers are generally out of range of Smis we should set the value either.
+#if !V8_HOST_ARCH_64_BIT
+ // Set up the special root array entry containing the stack guard.
+ // This is actually an address, but the tag makes the GC ignore it.
+ set_stack_limit(Smi::FromInt(limit >> kSmiTagSize));
+#else
+ set_stack_limit(Smi::FromInt(0));
+#endif
+}
+
+
void Heap::TearDown() {
GlobalHandles::TearDown();
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index a9d44c6..212dfa7 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -132,7 +132,8 @@ namespace internal {
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId)
+ V(Object, last_script_id, LastScriptId) \
+ V(Smi, stack_limit, StackLimit)
#define ROOT_LIST(V) \
@@ -227,6 +228,11 @@ class Heap : public AllStatic {
// Destroys all memory allocated by the heap.
static void TearDown();
+ // Sets the stack limit in the roots_ array. Some architectures generate code
+ // that looks here, because it is faster than loading from the static jslimit_
+ // variable.
+ static void SetStackLimit(intptr_t limit);
+
// Returns whether Setup has been called.
static bool HasBeenSetup();
@@ -629,7 +635,7 @@ class Heap : public AllStatic {
// Performs a full garbage collection. Force compaction if the
// parameter is true.
- static void CollectAllGarbage(bool force_compaction = false);
+ static void CollectAllGarbage(bool force_compaction);
// Performs a full garbage collection if a context has been disposed
// since the last time the check was performed.
@@ -733,6 +739,9 @@ class Heap : public AllStatic {
// Update the next script id.
static inline void SetLastScriptId(Object* last_script_id);
+ // Generated code can embed this address to get access to the roots.
+ static Object** roots_address() { return roots_; }
+
#ifdef DEBUG
static void Print();
static void PrintHandles();
@@ -839,6 +848,29 @@ class Heap : public AllStatic {
> old_gen_allocation_limit_;
}
+ // Can be called when the embedding application is idle.
+ static bool IdleNotification();
+
+ // Declare all the root indices.
+ enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
+#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
+ SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_DECLARATION
+
+ kSymbolTableRootIndex,
+ kStrongRootListLength = kSymbolTableRootIndex,
+ kRootListLength
+ };
+
private:
static int semispace_size_;
static int initial_semispace_size_;
@@ -923,26 +955,6 @@ class Heap : public AllStatic {
// last GC.
static int old_gen_exhausted_;
- // Declare all the root indices.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
-#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
- SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_DECLARATION
-
- kSymbolTableRootIndex,
- kStrongRootListLength = kSymbolTableRootIndex,
- kRootListLength
- };
-
static Object* roots_[kRootListLength];
struct StringTypeTable {
@@ -1388,6 +1400,20 @@ class AssertNoAllocation {
bool old_state_;
};
+class DisableAssertNoAllocation {
+ public:
+ DisableAssertNoAllocation() {
+ old_state_ = Heap::allow_allocation(true);
+ }
+
+ ~DisableAssertNoAllocation() {
+ Heap::allow_allocation(old_state_);
+ }
+
+ private:
+ bool old_state_;
+};
+
#else // ndef DEBUG
class AssertNoAllocation {
@@ -1396,6 +1422,12 @@ class AssertNoAllocation {
~AssertNoAllocation() { }
};
+class DisableAssertNoAllocation {
+ public:
+ DisableAssertNoAllocation() { }
+ ~DisableAssertNoAllocation() { }
+};
+
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc
index 6de9de6..55dc92d 100644
--- a/V8Binding/v8/src/ia32/builtins-ia32.cc
+++ b/V8Binding/v8/src/ia32/builtins-ia32.cc
@@ -132,15 +132,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
- ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
- __ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
- __ add(edi, Operand(ebx)); // Calculate new top
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above_equal, &rt_call);
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
+ __ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -165,15 +158,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop);
}
- // Mostly done with the JSObject. Add the heap tag and store the new top, so
- // that we can continue and jump into the continuation code at any time from
- // now on. Any failures need to undo the setting of the new top, so that the
- // heap is in a consistent state and verifiable.
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
// eax: initial map
// ebx: JSObject
// edi: start of next object
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
- __ mov(Operand::StaticVariable(new_space_allocation_top), edi);
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -198,10 +190,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above_equal, &undo_allocation);
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+ __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ true);
// Initialize the FixedArray.
// ebx: JSObject
@@ -245,8 +241,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties.
// ebx: JSObject (previous new top)
__ bind(&undo_allocation);
- __ xor_(Operand(ebx), Immediate(kHeapObjectTag)); // clear the heap tag
- __ mov(Operand::StaticVariable(new_space_allocation_top), ebx);
+ __ UndoAllocationInNewSpace(ebx);
}
// Allocate the new receiver object using the runtime call.
@@ -669,7 +664,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(ebp, Operand(esp));
// Store the arguments adaptor context sentinel.
- __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ push(edi);
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc
index bf1f81b..a9face1 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.cc
+++ b/V8Binding/v8/src/ia32/codegen-ia32.cc
@@ -2139,7 +2139,8 @@ void CodeGenerator::CallApplyLazy(Property* apply,
Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
@@ -4912,7 +4913,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker);
__ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@@ -6947,21 +6948,18 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register result) {
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ mov(Operand(scratch1), Immediate(allocation_top));
- __ mov(result, Operand(scratch1, 0));
- __ lea(scratch2, Operand(result, HeapNumber::kSize)); // scratch2: new top
- __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
- __ j(above, need_gc, not_taken);
-
- __ mov(Operand(scratch1, 0), scratch2); // store new top
+ // Allocate heap number in new space.
+ __ AllocateObjectInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ false);
+
+ // Set the map and tag the result.
__ mov(Operand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
- // Tag old top and use as result.
- __ add(Operand(result), Immediate(kHeapObjectTag));
+ __ or_(Operand(result), Immediate(kHeapObjectTag));
}
@@ -7109,7 +7107,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -7141,7 +7139,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -7192,7 +7190,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
@@ -7724,11 +7722,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(ebp);
__ mov(ebp, Operand(esp));
- // Save callee-saved registers (C calling conventions).
+ // Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Push something that is not an arguments adaptor.
- __ push(Immediate(~ArgumentsAdaptorFrame::SENTINEL));
- __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
__ push(ebx);
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index e362cd3..754b74a 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -620,6 +620,146 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::LoadAllocationTopHelper(
+ Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Just return if allocation top is already known.
+ if (result_contains_top_on_entry) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ mov(result, Operand::StaticVariable(new_space_allocation_top));
+ } else {
+ ASSERT(!scratch.is(result_end));
+ mov(Operand(scratch), Immediate(new_space_allocation_top));
+ mov(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Update new top. Use scratch if available.
+ if (scratch.is(no_reg)) {
+ mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+ } else {
+ mov(Operand(scratch, 0), result_end);
+ }
+}
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, object_size));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required, not_taken);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, element_count, element_size, header_size));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ if (!object_size.is(result_end)) {
+ mov(result_end, object_size);
+ }
+ add(result_end, Operand(result));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required, not_taken);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Make sure the object has no tag before resetting top.
+ and_(Operand(object), Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+ cmp(object, Operand::StaticVariable(new_space_allocation_top));
+ Check(below, "Undo allocation of non allocated memory");
+#endif
+ mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.h b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
index 42620dd..f10ec16 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.h
@@ -184,6 +184,48 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. If the new space is exhausted control
+ // continues at the gc_required label. The allocated object is returned in
+ // result and end of the new object is returned in result_end. The register
+ // scratch can be passed as no_reg in which case an additional object
+ // reference will be added to the reloc info. The returned pointers in result
+ // and result_end have not yet been tagged as heap objects. If
+ // result_contains_top_on_entry is true the contnt of result is known to be
+ // the allocation top on entry (could be result_end from a previous call to
+ // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
@@ -303,6 +345,13 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry);
+ void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index a626377..f599f79 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -1740,6 +1740,136 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_stub_call;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check to see whether there are any break points in the function code. If
+ // there are jump to the generic constructor stub which calls the actual
+ // code for the function thereby hitting the break points.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(not_equal, &generic_stub_call, not_taken);
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &generic_stub_call);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+ // Cannot construct functions this way.
+ // edi: constructor
+ // ebx: initial map
+ __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
+ __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+ // Now allocate the JSObject on the heap by moving the new space allocation
+ // top forward.
+ // edi: constructor
+ // ebx: initial map
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ shl(ecx, kPointerSizeLog2);
+ // Make sure that the maximum heap object size will never cause us
+ // problems here.
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
+ __ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false);
+
+ // Allocated the JSObject, now initialize the fields and add the heap tag.
+ // ebx: initial map
+ // edx: JSObject
+ __ mov(Operand(edx, JSObject::kMapOffset), ebx);
+ __ mov(ebx, Factory::empty_fixed_array());
+ __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
+ __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
+ __ or_(Operand(edx), Immediate(kHeapObjectTag));
+
+ // Push the allocated object to the stack. This is the object that will be
+ // returned.
+ __ push(edx);
+
+ // eax: argc
+ // edx: JSObject
+ // Load the address of the first in-object property into edx.
+ __ lea(edx, Operand(edx, JSObject::kHeaderSize));
+ __ xor_(Operand(edx), Immediate(kHeapObjectTag)); // Clear heap object tag.
+ // Calculate the location of the first argument. The stack contains the
+ // allocated object and the return address on top of the argc arguments.
+ __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
+
+ // Use edi for holding undefined which is used in several places below.
+ __ mov(edi, Factory::undefined_value());
+
+ // eax: argc
+ // ecx: first argument
+ // edx: first in-object property of the JSObject
+ // edi: undefined
+ // Fill the initialized properties with a constant value or a passed argument
+ // depending on the this.x = ...; assignment in the function.
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
+ Label not_passed;
+ // Set the property to undefined.
+ __ mov(Operand(edx, i * kPointerSize), edi);
+ // Check if the argument assigned to the property is actually passed.
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ cmp(eax, arg_number);
+ __ j(below_equal, &not_passed);
+ // Argument passed - find it on the stack.
+ __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ __ mov(Operand(edx, i * kPointerSize), ebx);
+ __ bind(&not_passed);
+ } else {
+ // Set the property to the constant value.
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
+ }
+ }
+
+ // Fill the unused in-object property fields with undefined.
+ for (int i = shared->this_property_assignments_count();
+ i < shared->CalculateInObjectProperties();
+ i++) {
+ __ mov(Operand(edx, i * kPointerSize), edi);
+ }
+
+ // Move argc to ebx and retreive the JSObject to return.
+ __ mov(ebx, eax);
+ __ pop(eax);
+
+ // Remove caller arguments and receiver from the stack and return.
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+ __ push(ecx);
+ __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ __ ret(0);
+
+ // Jump to the generic stub in case the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_stub_call);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/mark-compact.cc b/V8Binding/v8/src/mark-compact.cc
index 10e81ac..d139093 100644
--- a/V8Binding/v8/src/mark-compact.cc
+++ b/V8Binding/v8/src/mark-compact.cc
@@ -76,7 +76,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepLargeObjectSpace();
- if (compacting_collection_) {
+ if (IsCompacting()) {
EncodeForwardingAddresses();
UpdatePointers();
diff --git a/V8Binding/v8/src/mksnapshot.cc b/V8Binding/v8/src/mksnapshot.cc
index 4891f37..80789eb 100644
--- a/V8Binding/v8/src/mksnapshot.cc
+++ b/V8Binding/v8/src/mksnapshot.cc
@@ -171,7 +171,7 @@ int main(int argc, char** argv) {
}
}
// Get rid of unreferenced scripts with a global GC.
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
i::Serializer ser;
ser.Serialize();
v8::internal::byte* bytes;
diff --git a/V8Binding/v8/src/objects-debug.cc b/V8Binding/v8/src/objects-debug.cc
index f713171..ef4aae5 100644
--- a/V8Binding/v8/src/objects-debug.cc
+++ b/V8Binding/v8/src/objects-debug.cc
@@ -463,6 +463,8 @@ void Map::MapPrint() {
PrintF(" - type: %s\n", TypeToString(instance_type()));
PrintF(" - instance size: %d\n", instance_size());
PrintF(" - inobject properties: %d\n", inobject_properties());
+ PrintF(" - pre-allocated property fields: %d\n",
+ pre_allocated_property_fields());
PrintF(" - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) {
PrintF(" - hidden_prototype\n");
diff --git a/V8Binding/v8/src/objects-inl.h b/V8Binding/v8/src/objects-inl.h
index 91aae2f..cabc8a2 100644
--- a/V8Binding/v8/src/objects-inl.h
+++ b/V8Binding/v8/src/objects-inl.h
@@ -131,7 +131,7 @@ bool Object::IsSmi() {
bool Object::IsHeapObject() {
- return HAS_HEAP_OBJECT_TAG(this);
+ return Internals::HasHeapObjectTag(this);
}
@@ -300,6 +300,10 @@ uint32_t StringShape::full_representation_tag() {
}
+STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
+ Internals::kFullStringRepresentationMask);
+
+
uint32_t StringShape::size_tag() {
return (type_ & kStringSizeMask);
}
@@ -325,6 +329,10 @@ bool StringShape::IsExternalTwoByte() {
}
+STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
+ Internals::kExternalTwoByteRepresentationTag);
+
+
uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_);
if (is_ascii_) {
@@ -730,7 +738,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
int Smi::value() {
- return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize;
+ return Internals::SmiValue(this);
}
@@ -814,15 +822,13 @@ Failure* Failure::RetryAfterGC(int requested_bytes) {
Failure* Failure::Construct(Type type, int value) {
int info = (value << kFailureTypeTagSize) | type;
- // TODO(X64): Stop using Smi validation for non-smi checks, even if they
- // happen to be identical at the moment.
- ASSERT(Smi::IsValid(info)); // Same validation check as in Smi
+ ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
return reinterpret_cast<Failure*>(
(static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
}
-bool Smi::IsValid(int value) {
+bool Smi::IsValid(intptr_t value) {
#ifdef DEBUG
bool in_range = (value >= kMinValue) && (value <= kMaxValue);
#endif
@@ -937,12 +943,13 @@ MapWord MapWord::EncodeAddress(Address map_address, int offset) {
Address MapWord::DecodeMapAddress(MapSpace* map_space) {
- int map_page_index = (value_ & kMapPageIndexMask) >> kMapPageIndexShift;
+ int map_page_index =
+ static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
ASSERT_MAP_PAGE_INDEX(map_page_index);
- int map_page_offset =
+ int map_page_offset = static_cast<int>(
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
- << kObjectAlignmentBits;
+ << kObjectAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
diff --git a/V8Binding/v8/src/objects.cc b/V8Binding/v8/src/objects.cc
index e4a3a67..9ea131f 100644
--- a/V8Binding/v8/src/objects.cc
+++ b/V8Binding/v8/src/objects.cc
@@ -2923,6 +2923,20 @@ Object* Map::CopyDropDescriptors() {
// Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
+
+ // If the map has pre-allocated properties always start out with a descriptor
+ // array describing these properties.
+ if (pre_allocated_property_fields() > 0) {
+ ASSERT(constructor()->IsJSFunction());
+ JSFunction* ctor = JSFunction::cast(constructor());
+ Object* descriptors =
+ ctor->initial_map()->instance_descriptors()->RemoveTransitions();
+ if (descriptors->IsFailure()) return descriptors;
+ Map::cast(result)->set_instance_descriptors(
+ DescriptorArray::cast(descriptors));
+ Map::cast(result)->set_pre_allocated_property_fields(
+ pre_allocated_property_fields());
+ }
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->ClearCodeCache();
@@ -4800,7 +4814,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_this_property_assignments,
bool only_simple_this_property_assignments,
FixedArray* assignments) {
- ASSERT(this_property_assignments()->IsUndefined());
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments,
only_this_property_assignments));
@@ -4812,6 +4825,18 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
}
+void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlyThisPropertyAssignments,
+ false));
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlySimpleThisPropertyAssignments,
+ false));
+ set_this_property_assignments(Heap::undefined_value());
+ set_this_property_assignments_count(0);
+}
+
+
String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
Object* obj = this_property_assignments();
ASSERT(obj->IsFixedArray());
@@ -4822,6 +4847,32 @@ String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
}
+bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
+ Object* obj = this_property_assignments();
+ ASSERT(obj->IsFixedArray());
+ ASSERT(index < this_property_assignments_count());
+ obj = FixedArray::cast(obj)->get(index * 3 + 1);
+ return Smi::cast(obj)->value() != -1;
+}
+
+
+int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
+ ASSERT(IsThisPropertyAssignmentArgument(index));
+ Object* obj =
+ FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
+ return Smi::cast(obj)->value();
+}
+
+
+Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
+ ASSERT(!IsThisPropertyAssignmentArgument(index));
+ Object* obj =
+ FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
+ return obj;
+}
+
+
+
// Support function for printing the source code to a StringStream
// without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
diff --git a/V8Binding/v8/src/objects.h b/V8Binding/v8/src/objects.h
index a402961..4b89899 100644
--- a/V8Binding/v8/src/objects.h
+++ b/V8Binding/v8/src/objects.h
@@ -905,7 +905,7 @@ class Smi: public Object {
static inline Smi* FromIntptr(intptr_t value);
// Returns whether value can be represented in a Smi.
- static inline bool IsValid(int value);
+ static inline bool IsValid(intptr_t value);
static inline bool IsIntptrValid(intptr_t);
@@ -1234,6 +1234,8 @@ class HeapObject: public Object {
static const int kMapOffset = Object::kHeaderSize;
static const int kHeaderSize = kMapOffset + kPointerSize;
+ STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
+
protected:
// helpers for calling an ObjectVisitor to iterate over pointers in the
// half-open range [start, end) specified as integer offsets
@@ -1664,6 +1666,8 @@ class JSObject: public HeapObject {
static const int kElementsOffset = kPropertiesOffset + kPointerSize;
static const int kHeaderSize = kElementsOffset + kPointerSize;
+ STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
+
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
private:
@@ -2897,6 +2901,8 @@ class Map: public HeapObject {
static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
static const int kBitField2Offset = kInstanceAttributesOffset + 3;
+ STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
+
// Bit positions for bit field.
static const int kUnused = 0; // To be used for marking recently used maps.
static const int kHasNonInstancePrototype = 1;
@@ -3108,6 +3114,9 @@ class SharedFunctionInfo: public HeapObject {
bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments);
+ // Clear information on assignments of the form this.x = ...;
+ void ClearThisPropertyAssignmentsInfo();
+
// Indicate that this function only consists of assignments of the form
// this.x = ...;.
inline bool has_only_this_property_assignments();
@@ -3122,6 +3131,9 @@ class SharedFunctionInfo: public HeapObject {
inline int this_property_assignments_count();
inline void set_this_property_assignments_count(int value);
String* GetThisPropertyAssignmentName(int index);
+ bool IsThisPropertyAssignmentArgument(int index);
+ int GetThisPropertyAssignmentArgument(int index);
+ Object* GetThisPropertyAssignmentConstant(int index);
// [source code]: Source code for the function.
bool HasSourceCode();
@@ -4128,6 +4140,8 @@ class ExternalString: public String {
static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSize = kResourceOffset + kPointerSize;
+ STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
};
@@ -4341,6 +4355,8 @@ class Proxy: public HeapObject {
static const int kProxyOffset = HeapObject::kHeaderSize;
static const int kSize = kProxyOffset + kPointerSize;
+ STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
};
diff --git a/V8Binding/v8/src/platform-win32.cc b/V8Binding/v8/src/platform-win32.cc
index 0a2d990..d4a183d 100644
--- a/V8Binding/v8/src/platform-win32.cc
+++ b/V8Binding/v8/src/platform-win32.cc
@@ -54,10 +54,6 @@
#define _WIN32_WINNT 0x500
#endif
-#ifdef _WIN64
-#error Windows 64-bit blatforms not supported
-#endif
-
#include <windows.h>
#include <time.h> // For LocalOffset() implementation.
@@ -1190,6 +1186,9 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
memset(&context, 0, sizeof(context));
context.ContextFlags = CONTEXT_CONTROL;
context.ContextFlags = CONTEXT_CONTROL;
+#ifdef _WIN64
+ // TODO(X64): Implement context capture.
+#else
__asm call x
__asm x: pop eax
__asm mov context.Eip, eax
@@ -1199,15 +1198,22 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// capture the context instead of inline assembler. However it is
// only available on XP, Vista, Server 2003 and Server 2008 which
// might not be sufficient.
+#endif
// Initialize the stack walking
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
+#ifdef _WIN64
+ stack_frame.AddrPC.Offset = context.Rip;
+ stack_frame.AddrFrame.Offset = context.Rbp;
+ stack_frame.AddrStack.Offset = context.Rsp;
+#else
stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Offset = context.Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
int frames_count = 0;
@@ -1311,8 +1317,11 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() {
- // Floating point code runs faster if the stack is 8-byte aligned.
- return 8;
+#ifdef _WIN64
+ return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#else
+ return 8; // Floating-point math runs faster with 8-byte alignment.
+#endif
}
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index b3e8aa4..213d9a3 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -45,6 +45,7 @@
#include "v8threads.h"
#include "smart-pointer.h"
#include "parser.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -1235,6 +1236,9 @@ static Object* Runtime_SetCode(Arguments args) {
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
target->shared()->set_script(Heap::undefined_value());
+ // Clear the optimization hints related to the compiled code as these are no
+ // longer valid when the code is overwritten.
+ target->shared()->ClearThisPropertyAssignmentsInfo();
context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid
@@ -4326,11 +4330,21 @@ static Object* Runtime_NewClosure(Arguments args) {
}
-static Handle<Code> ComputeConstructStub(Handle<Map> map) {
+static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
// TODO(385): Change this to create a construct stub specialized for
// the given map to make allocation of simple objects - and maybe
// arrays - much faster.
- return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ if (FLAG_inline_new
+ && shared->has_only_simple_this_property_assignments()) {
+ ConstructStubCompiler compiler;
+ Object* code = compiler.CompileConstructStub(*shared);
+ if (code->IsFailure()) {
+ return Builtins::builtin(Builtins::JSConstructStubGeneric);
+ }
+ return Code::cast(code);
+ }
+
+ return Builtins::builtin(Builtins::JSConstructStubGeneric);
}
@@ -4373,15 +4387,25 @@ static Object* Runtime_NewObject(Arguments args) {
}
}
+ // The function should be compiled for the optimization hints to be available.
+ if (!function->shared()->is_compiled()) {
+ CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
+ CLEAR_EXCEPTION,
+ 0);
+ }
+
bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function);
if (first_allocation) {
Handle<Map> map = Handle<Map>(function->initial_map());
- Handle<Code> stub = ComputeConstructStub(map);
+ Handle<Code> stub = Handle<Code>(
+ ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
function->shared()->set_construct_stub(*stub);
}
+
Counters::constructed_objects.Increment();
Counters::constructed_objects_runtime.Increment();
+
return *result;
}
@@ -7263,7 +7287,7 @@ static Object* Runtime_DebugReferencedBy(Arguments args) {
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
@@ -7339,7 +7363,7 @@ static Object* Runtime_DebugConstructedBy(Arguments args) {
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -7386,7 +7410,7 @@ static Object* Runtime_SystemBreak(Arguments args) {
}
-static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
+static Object* Runtime_DebugDisassembleFunction(Arguments args) {
#ifdef DEBUG
HandleScope scope;
ASSERT(args.length() == 1);
@@ -7401,6 +7425,21 @@ static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
}
+static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
+#ifdef DEBUG
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ // Get the function and make sure it is compiled.
+ CONVERT_ARG_CHECKED(JSFunction, func, 0);
+ if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ func->shared()->construct_stub()->PrintLn();
+#endif // DEBUG
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_FunctionGetInferredName(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -7633,7 +7672,7 @@ void Runtime::PerformGC(Object* result) {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
Counters::gc_last_resort_from_js.Increment();
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
}
}
diff --git a/V8Binding/v8/src/runtime.h b/V8Binding/v8/src/runtime.h
index d47ca18..1be677a 100644
--- a/V8Binding/v8/src/runtime.h
+++ b/V8Binding/v8/src/runtime.h
@@ -303,7 +303,8 @@ namespace internal {
F(DebugConstructedBy, 2) \
F(DebugGetPrototype, 1) \
F(SystemBreak, 0) \
- F(FunctionGetAssemblerCode, 1) \
+ F(DebugDisassembleFunction, 1) \
+ F(DebugDisassembleConstructor, 1) \
F(FunctionGetInferredName, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
diff --git a/V8Binding/v8/src/scanner.cc b/V8Binding/v8/src/scanner.cc
index 720dc35..3dae414 100644
--- a/V8Binding/v8/src/scanner.cc
+++ b/V8Binding/v8/src/scanner.cc
@@ -183,7 +183,8 @@ uc32 TwoByteStringUTF16Buffer::Advance() {
void TwoByteStringUTF16Buffer::PushBack(uc32 ch) {
pos_--;
- ASSERT(pos_ >= 0 && raw_data_[pos_] == ch);
+ ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
+ ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
}
diff --git a/V8Binding/v8/src/scanner.h b/V8Binding/v8/src/scanner.h
index 340da86..a201d0e 100644
--- a/V8Binding/v8/src/scanner.h
+++ b/V8Binding/v8/src/scanner.h
@@ -212,6 +212,8 @@ class Scanner {
static unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ static const int kCharacterLookaheadBufferSize = 1;
+
private:
CharacterStreamUTF16Buffer char_stream_buffer_;
TwoByteStringUTF16Buffer two_byte_string_buffer_;
@@ -242,8 +244,6 @@ class Scanner {
bool has_line_terminator_before_next_;
bool is_pre_parsing_;
- static const int kCharacterLookaheadBufferSize = 1;
-
// Literal buffer support
void StartLiteral();
void AddChar(uc32 ch);
diff --git a/V8Binding/v8/src/serialize.cc b/V8Binding/v8/src/serialize.cc
index 963138e..d2fd1e4 100644
--- a/V8Binding/v8/src/serialize.cc
+++ b/V8Binding/v8/src/serialize.cc
@@ -672,13 +672,17 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
2,
"Factory::the_hole_value().location()");
- Add(ExternalReference::address_of_stack_guard_limit().address(),
+ Add(ExternalReference::roots_address().address(),
UNCLASSIFIED,
3,
+ "Heap::roots_address()");
+ Add(ExternalReference::address_of_stack_guard_limit().address(),
+ UNCLASSIFIED,
+ 4,
"StackGuard::address_of_jslimit()");
Add(ExternalReference::address_of_regexp_stack_limit().address(),
UNCLASSIFIED,
- 4,
+ 5,
"RegExpStack::limit_address()");
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
@@ -699,36 +703,36 @@ void ExternalReferenceTable::PopulateTable() {
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
- 5,
+ 10,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 10,
+ 11,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
- 11,
+ 12,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
- 12,
+ 13,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
- 13,
+ 14,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
- 14,
+ 15,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
- 15,
+ 16,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
- 16,
+ 17,
"compare_doubles");
}
diff --git a/V8Binding/v8/src/spaces.cc b/V8Binding/v8/src/spaces.cc
index 9227a87..45e82f4 100644
--- a/V8Binding/v8/src/spaces.cc
+++ b/V8Binding/v8/src/spaces.cc
@@ -951,15 +951,43 @@ void NewSpace::Flip() {
}
-bool NewSpace::Grow() {
+void NewSpace::Grow() {
ASSERT(Capacity() < MaximumCapacity());
- // TODO(1240712): Failure to double the from space can result in
- // semispaces of different sizes. In the event of that failure, the
- // to space doubling should be rolled back before returning false.
- if (!to_space_.Grow() || !from_space_.Grow()) return false;
+ if (to_space_.Grow()) {
+ // Only grow from space if we managed to grow to space.
+ if (!from_space_.Grow()) {
+ // If we managed to grow to space but couldn't grow from space,
+ // attempt to shrink to space.
+ if (!to_space_.ShrinkTo(from_space_.Capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ V8::FatalProcessOutOfMemory("Failed to grow new space.");
+ }
+ }
+ }
+ allocation_info_.limit = to_space_.high();
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::Shrink() {
+ int new_capacity = Max(InitialCapacity(), 2 * Size());
+ int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+ if (rounded_new_capacity < Capacity() &&
+ to_space_.ShrinkTo(rounded_new_capacity)) {
+ // Only shrink from space if we managed to shrink to space.
+ if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+ // If we managed to shrink to space but couldn't shrink from
+ // space, attempt to grow to space again.
+ if (!to_space_.GrowTo(from_space_.Capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ V8::FatalProcessOutOfMemory("Failed to shrink new space.");
+ }
+ }
+ }
allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- return true;
}
@@ -1058,6 +1086,7 @@ bool SemiSpace::Setup(Address start,
// otherwise. In the mark-compact collector, the memory region of the from
// space is used as the marking stack. It requires contiguous memory
// addresses.
+ initial_capacity_ = initial_capacity;
capacity_ = initial_capacity;
maximum_capacity_ = maximum_capacity;
committed_ = false;
@@ -1079,9 +1108,9 @@ void SemiSpace::TearDown() {
bool SemiSpace::Grow() {
- // Commit 50% extra space but only up to maximum capacity.
+ // Double the semispace size but only up to maximum capacity.
int maximum_extra = maximum_capacity_ - capacity_;
- int extra = Min(RoundUp(capacity_ / 2, OS::AllocateAlignment()),
+ int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
maximum_extra);
if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
return false;
@@ -1091,6 +1120,32 @@ bool SemiSpace::Grow() {
}
+bool SemiSpace::GrowTo(int new_capacity) {
+ ASSERT(new_capacity <= maximum_capacity_);
+ ASSERT(new_capacity > capacity_);
+ size_t delta = new_capacity - capacity_;
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+ if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
+ return false;
+ }
+ capacity_ = new_capacity;
+ return true;
+}
+
+
+bool SemiSpace::ShrinkTo(int new_capacity) {
+ ASSERT(new_capacity >= initial_capacity_);
+ ASSERT(new_capacity < capacity_);
+ size_t delta = capacity_ - new_capacity;
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+ if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
+ return false;
+ }
+ capacity_ = new_capacity;
+ return true;
+}
+
+
#ifdef DEBUG
void SemiSpace::Print() { }
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index f12e0e4..98663db 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -1010,6 +1010,15 @@ class SemiSpace : public Space {
// address range to grow).
bool Grow();
+ // Grow the semispace to the new capacity. The new capacity
+ // requested must be larger than the current capacity.
+ bool GrowTo(int new_capacity);
+
+ // Shrinks the semispace to the new capacity. The new capacity
+ // requested must be more than the amount of used memory in the
+ // semispace and less than the current capacity.
+ bool ShrinkTo(int new_capacity);
+
// Returns the start address of the space.
Address low() { return start_; }
// Returns one past the end address of the space.
@@ -1057,11 +1066,14 @@ class SemiSpace : public Space {
// Returns the maximum capacity of the semi space.
int MaximumCapacity() { return maximum_capacity_; }
+ // Returns the initial capacity of the semi space.
+ int InitialCapacity() { return initial_capacity_; }
private:
// The current and maximum capacity of the space.
int capacity_;
int maximum_capacity_;
+ int initial_capacity_;
// The start address of the space.
Address start_;
@@ -1152,8 +1164,11 @@ class NewSpace : public Space {
void Flip();
// Grow the capacity of the semispaces. Assumes that they are not at
- // their maximum capacity. Returns a flag indicating success or failure.
- bool Grow();
+ // their maximum capacity.
+ void Grow();
+
+ // Shrink the capacity of the semispaces.
+ void Shrink();
// True if the address or object lies in the address range of either
// semispace (not necessarily below the allocation pointer).
@@ -1181,6 +1196,12 @@ class NewSpace : public Space {
return to_space_.MaximumCapacity();
}
+ // Returns the initial capacity of a semispace.
+ int InitialCapacity() {
+ ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
+ return to_space_.InitialCapacity();
+ }
+
// Return the address of the allocation pointer in the active semispace.
Address top() { return allocation_info_.top; }
// Return the address of the first object in the active semispace.
diff --git a/V8Binding/v8/src/stub-cache.cc b/V8Binding/v8/src/stub-cache.cc
index b25f5b4..a719f29 100644
--- a/V8Binding/v8/src/stub-cache.cc
+++ b/V8Binding/v8/src/stub-cache.cc
@@ -1097,4 +1097,11 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
}
+Object* ConstructStubCompiler::GetCode() {
+ Code::Flags flags = Code::ComputeFlags(Code::STUB);
+ return GetCodeWithFlags(flags, "ConstructStub");
+}
+
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/stub-cache.h b/V8Binding/v8/src/stub-cache.h
index 3b3caad..e268920 100644
--- a/V8Binding/v8/src/stub-cache.h
+++ b/V8Binding/v8/src/stub-cache.h
@@ -561,6 +561,17 @@ class CallStubCompiler: public StubCompiler {
};
+class ConstructStubCompiler: public StubCompiler {
+ public:
+ explicit ConstructStubCompiler() {}
+
+ Object* CompileConstructStub(SharedFunctionInfo* shared);
+
+ private:
+ Object* GetCode();
+};
+
+
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_
diff --git a/V8Binding/v8/src/utils.h b/V8Binding/v8/src/utils.h
index 91662ee..275dbb5 100644
--- a/V8Binding/v8/src/utils.h
+++ b/V8Binding/v8/src/utils.h
@@ -114,8 +114,10 @@ static inline bool IsAligned(T value, T alignment) {
// Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr, int alignment, int offset) {
- int offs = OffsetFrom(addr + offset);
+static inline bool IsAddressAligned(Address addr,
+ intptr_t alignment,
+ int offset) {
+ intptr_t offs = OffsetFrom(addr + offset);
return IsAligned(offs, alignment);
}
@@ -446,15 +448,15 @@ class ScopedVector : public Vector<T> {
inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, strlen(data));
+ return Vector<const char>(data, static_cast<int>(strlen(data)));
}
inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, strlen(data));
+ return Vector<char>(data, static_cast<int>(strlen(data)));
}
inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = strlen(data);
+ int length = static_cast<int>(strlen(data));
return Vector<char>(data, (length < max) ? length : max);
}
diff --git a/V8Binding/v8/src/v8-counters.h b/V8Binding/v8/src/v8-counters.h
index 43cd5e3..0b941f6 100644
--- a/V8Binding/v8/src/v8-counters.h
+++ b/V8Binding/v8/src/v8-counters.h
@@ -141,6 +141,7 @@ namespace internal {
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
+ SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
diff --git a/V8Binding/v8/src/v8.cc b/V8Binding/v8/src/v8.cc
index 00e0e6e..a204158 100644
--- a/V8Binding/v8/src/v8.cc
+++ b/V8Binding/v8/src/v8.cc
@@ -98,6 +98,10 @@ bool V8::Initialize(Deserializer *des) {
StubCache::Clear();
}
+ // Deserializing may put strange things in the root array's copy of the
+ // stack guard.
+ Heap::SetStackLimit(StackGuard::jslimit());
+
// Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap
// objects in place for creating the code object used for probing.
@@ -156,13 +160,14 @@ uint32_t V8::Random() {
return (hi << 16) + (lo & 0xFFFF);
}
-void V8::IdleNotification(bool is_high_priority) {
- if (!FLAG_use_idle_notification) return;
+
+bool V8::IdleNotification(bool is_high_priority) {
+ if (!FLAG_use_idle_notification) return false;
// Ignore high priority instances of V8.
- if (is_high_priority) return;
+ if (is_high_priority) return false;
- // Uncommit unused memory in new space.
- Heap::UncommitFromSpace();
+ // Tell the heap that it may want to adjust.
+ return Heap::IdleNotification();
}
diff --git a/V8Binding/v8/src/v8.h b/V8Binding/v8/src/v8.h
index 1ca3245..50be6df 100644
--- a/V8Binding/v8/src/v8.h
+++ b/V8Binding/v8/src/v8.h
@@ -100,7 +100,7 @@ class V8 : public AllStatic {
static Smi* RandomPositiveSmi();
// Idle notification directly from the API.
- static void IdleNotification(bool is_high_priority);
+ static bool IdleNotification(bool is_high_priority);
private:
// True if engine is currently running
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index 9609938..d140470 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 6
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc
index 6988d72..882b32d 100644
--- a/V8Binding/v8/src/x64/builtins-x64.cc
+++ b/V8Binding/v8/src/x64/builtins-x64.cc
@@ -53,7 +53,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
- __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ push(rdi);
@@ -542,16 +542,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(rbx, Operand(kScratchRegister, 0));
- __ addq(rdi, rbx); // Calculate new top
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rdi, Operand(kScratchRegister, 0));
- __ j(above_equal, &rt_call);
+ __ AllocateObjectInNewSpace(rdi, rbx, rdi, no_reg, &rt_call, false);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
@@ -576,16 +567,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop);
}
- // Mostly done with the JSObject. Add the heap tag and store the new top, so
- // that we can continue and jump into the continuation code at any time from
- // now on. Any failures need to undo the setting of the new top, so that the
- // heap is in a consistent state and verifiable.
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
// rax: initial map
// rbx: JSObject
// rdi: start of next object
__ or_(rbx, Immediate(kHeapObjectTag));
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rdi);
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -610,11 +599,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(rax, Operand(rdi, rdx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rax, Operand(kScratchRegister, 0));
- __ j(above_equal, &undo_allocation);
- __ store_rax(new_space_allocation_top);
+ __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ true);
// Initialize the FixedArray.
// rbx: JSObject
@@ -659,9 +651,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties.
// rbx: JSObject (previous new top)
__ bind(&undo_allocation);
- __ xor_(rbx, Immediate(kHeapObjectTag)); // clear the heap tag
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rbx);
+ __ UndoAllocationInNewSpace(rbx);
}
// Allocate the new receiver object using the runtime call.
@@ -756,7 +746,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array,
// while rdi holds the function pointer and rsi the context.
-#ifdef __MSVC__
+#ifdef _WIN64
// MSVC parameters in:
// rcx : entry (ignored)
// rdx : function
@@ -766,7 +756,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
- // Enter an internal frame.
__ EnterInternalFrame();
// Load the function context into rsi.
@@ -783,7 +772,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movq(rdi, rdx);
-#else // !defined(__MSVC__)
+#else // !defined(_WIN64)
// GCC parameters in:
// rdi : entry (ignored)
// rsi : function
@@ -807,7 +796,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, rcx);
__ movq(rbx, r8);
-#endif // __MSVC__
+#endif // _WIN64
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index 462b960..f915a0c 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -649,6 +649,196 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+void CodeGenerator::CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+ ASSERT(arguments->IsArguments());
+
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
+ // give us a megamorphic load site. Not super, but it works.
+ Reference ref(this, apply);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+ ASSERT(ref.type() == Reference::NAMED);
+
+ // Load the receiver and the existing arguments object onto the
+ // expression stack. Avoid allocating the arguments object here.
+ Load(receiver);
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+ // Emit the source position information after having loaded the
+ // receiver and the arguments.
+ CodeForSourcePosition(position);
+
+ // Check if the arguments object has been lazily allocated
+ // already. If so, just use that instead of copying the arguments
+ // from the stack. This also deals with cases where a local variable
+ // named 'arguments' has been introduced.
+ frame_->Dup();
+ Result probe = frame_->Pop();
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ Cmp(probe.reg(), Factory::the_hole_value());
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
+
+ if (try_lazy) {
+ JumpTarget build_args;
+
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ { frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ // We allow all JSObjects including JSFunctions. As long as
+ // JS_FUNCTION_TYPE is the last instance type and it is right
+ // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+ // bound.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ build_args.Branch(below);
+ }
+
+ // Verify that we're invoking Function.prototype.apply.
+ { frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ __ testl(apply.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ movq(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ apply_code);
+ build_args.Branch(not_equal);
+ }
+
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ __ testl(rdi, Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ shrl(rax, Immediate(kSmiTagSize));
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ bind(&loop);
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ jmp(&loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(rax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
+ frame_->RestoreContextRegister();
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -678,27 +868,6 @@ void CodeGenerator::CheckStack() {
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
void CodeGenerator::VisitAndSpill(Statement* statement) {
// TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
@@ -2612,27 +2781,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
- // TODO(X64): Consider optimizing Function.prototype.apply calls
- // with arguments object. Requires lazy arguments allocation;
- // see http://codereview.chromium.org/147075.
+ Handle<String> name = Handle<String>::cast(literal->handle());
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(literal->handle());
- Load(property->obj());
+ if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+ name->IsEqualTo(CStrVector("apply")) &&
+ args->length() == 2 &&
+ args->at(1)->AsVariableProxy() != NULL &&
+ args->at(1)->AsVariableProxy()->IsArguments()) {
+ // Use the optimized Function.prototype.apply that avoids
+ // allocating lazily allocated arguments objects.
+ CallApplyLazy(property,
+ args->at(0),
+ args->at(1)->AsVariableProxy(),
+ node->position());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
+ } else {
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(name);
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ }
} else {
// -------------------------------------------
@@ -3473,7 +3655,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker);
__ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@@ -3564,7 +3746,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ testl(index.reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000U)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
__ j(not_zero, &slow_case);
// Untag the index.
__ sarl(index.reg(), Immediate(kSmiTagSize));
@@ -4586,7 +4768,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
: RelocInfo::CODE_TARGET_CONTEXT;
Result answer = frame_->CallLoadIC(mode);
// A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test eax
+ // property case was inlined. Ensure that there is not a test rax
// instruction here.
masm_->nop();
// Discard the global object. The result is in answer.
@@ -5354,7 +5536,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode);
// Check for negative or non-Smi left hand side.
__ testl(operand->reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
deferred->Branch(not_zero);
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
@@ -5894,7 +6076,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// Check that the key is a non-negative smi.
__ testl(key.reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
@@ -6264,8 +6446,8 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
} else {
unsigned_left >>= shift_amount;
}
- ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed.
- answer_object = Smi::FromInt(unsigned_left); // Converted to signed.
+ ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+ answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
break;
}
default:
@@ -6618,7 +6800,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime);
// Value in rcx is Smi encoded.
@@ -6651,7 +6833,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -6701,7 +6883,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -6763,10 +6945,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC.
-#ifdef __MSVC__
- __ movq(rcx, rax); // argc.
-#else // ! defined(__MSVC__)
- __ movq(rdi, rax); // argv.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // ! defined(_WIN64)
+ __ movq(rdi, rax);
#endif
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
@@ -6782,11 +6964,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
// Call C function.
-#ifdef __MSVC__
- // MSVC passes arguments in rcx, rdx, r8, r9
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
-#else // ! defined(__MSVC__)
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack
+ __ movq(Operand(rsp, 1 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 2 * kPointerSize), r15); // argv.
+ // Pass a pointer to the Arguments object as the first argument.
+ __ lea(rcx, Operand(rsp, 1 * kPointerSize));
+#else // ! defined(_WIN64)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
@@ -7012,11 +7197,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(rbp);
__ movq(rbp, rsp);
- // Save callee-saved registers (X64 calling conventions).
+ // Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Push something that is not an arguments adaptor.
- __ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL));
- __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (X64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
@@ -7139,24 +7324,20 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch,
Register result) {
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ movq(scratch, allocation_top); // scratch: address of allocation top.
- __ movq(result, Operand(scratch, 0));
- __ addq(result, Immediate(HeapNumber::kSize)); // New top.
- __ movq(kScratchRegister, allocation_limit);
- __ cmpq(result, Operand(kScratchRegister, 0));
- __ j(above, need_gc);
-
- __ movq(Operand(scratch, 0), result); // store new top
- __ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
+ // Allocate heap number in new space.
+ __ AllocateObjectInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ need_gc,
+ false);
+
+ // Set the map and tag the result.
+ __ addq(result, Immediate(kHeapObjectTag));
__ movq(kScratchRegister,
Factory::heap_number_map(),
RelocInfo::EMBEDDED_OBJECT);
__ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- // Tag old top and use as result.
}
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index bfdff56..2ae8145 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -481,6 +481,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position);
+
void CheckStack();
struct InlineRuntimeLUT {
diff --git a/V8Binding/v8/src/x64/frames-x64.h b/V8Binding/v8/src/x64/frames-x64.h
index 24c78da..5442be9 100644
--- a/V8Binding/v8/src/x64/frames-x64.h
+++ b/V8Binding/v8/src/x64/frames-x64.h
@@ -60,6 +60,7 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -10 * kPointerSize;
+ static const int kArgvOffset = 6 * kPointerSize;
};
@@ -90,10 +91,12 @@ class StandardFrameConstants : public AllStatic {
class JavaScriptFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc
index 1c74a44..d41a56c 100644
--- a/V8Binding/v8/src/x64/ic-x64.cc
+++ b/V8Binding/v8/src/x64/ic-x64.cc
@@ -785,7 +785,19 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -805,7 +817,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -819,7 +831,52 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
+
void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss, probe, global;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(less, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, &global);
+
+ // Check for non-global object that requires access check.
+ __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &miss);
+
+ // Search the dictionary placing the result in eax.
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
+ GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
+ __ ret(0);
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobalProxy(rax, rdx, &miss);
+ __ jmp(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
@@ -906,6 +963,21 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ __ movq(rdx, Operand(rsp, kPointerSize));
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
+ // Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
}
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index 8f8398d..10d4503 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -994,8 +994,11 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
- // Reserve space for two arguments: argc and argv
- subq(rsp, Immediate(2 * kPointerSize));
+ // Reserve space for the Arguments object. The Windows 64-bit ABI
+ // requires us to pass this structure as a pointer to its location on
+ // the stack. We also need backing space for the pointer, even though
+ // it is passed in a register.
+ subq(rsp, Immediate(3 * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -1201,4 +1204,156 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::LoadAllocationTopHelper(
+ Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Just return if allocation top is already known.
+ if (result_contains_top_on_entry) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ movq(kScratchRegister, new_space_allocation_top);
+ movq(result, Operand(kScratchRegister, 0));
+ } else {
+ ASSERT(!scratch.is(result_end));
+ movq(scratch, new_space_allocation_top);
+ movq(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Update new top.
+ if (result_end.is(rax)) {
+ // rax can be stored directly to a memory location.
+ store_rax(new_space_allocation_top);
+ } else {
+ // Register required - use scratch provided if available.
+ if (scratch.is(no_reg)) {
+ movq(kScratchRegister, new_space_allocation_top);
+ movq(Operand(kScratchRegister, 0), result_end);
+ } else {
+ movq(Operand(scratch, 0), result_end);
+ }
+ }
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, object_size));
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, element_count, element_size, header_size));
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ if (!object_size.is(result_end)) {
+ movq(result_end, object_size);
+ }
+ addq(result_end, result);
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Make sure the object has no tag before resetting top.
+ and_(object, Immediate(~kHeapObjectTagMask));
+ movq(kScratchRegister, new_space_allocation_top);
+#ifdef DEBUG
+ cmpq(object, Operand(kScratchRegister, 0));
+ Check(below, "Undo allocation of non allocated memory");
+#endif
+ movq(Operand(kScratchRegister, 0), object);
+}
+
+
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index cba55eb..31135d9 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -223,6 +223,48 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. If the new space is exhausted control
+ // continues at the gc_required label. The allocated object is returned in
+ // result and end of the new object is returned in result_end. The register
+ // scratch can be passed as no_reg in which case an additional object
+ // reference will be added to the reloc info. The returned pointers in result
+ // and result_end have not yet been tagged as heap objects. If
+ // result_contains_top_on_entry is true the content of result is known to be
+ // the allocation top on entry (could be result_end from a previous call to
+ // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
@@ -341,6 +383,13 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry);
+ void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
index 1e38d6d..4c6a84d 100644
--- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -319,7 +319,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
} else {
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
-#ifndef __MSVC__
+#ifndef _WIN64
// Callee save on Win64
__ push(rsi);
__ push(rdi);
@@ -333,7 +333,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
-#ifdef __MSVC__
+#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
@@ -356,7 +356,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_->CodeObject());
__ pop(backtrack_stackpointer());
-#ifndef __MSVC__
+#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
#endif
@@ -604,7 +604,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
-#ifdef __MSVC__
+#ifdef _WIN64
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots,
__ movq(Operand(rbp, kInputString), rcx);
@@ -740,7 +740,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Exit and return rax
__ bind(&exit_label_);
-#ifdef __MSVC__
+#ifdef _WIN64
// Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ pop(rbx);
@@ -794,7 +794,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label grow_failed;
// Save registers before calling C function
-#ifndef __MSVC__
+#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
__ push(rsi);
__ push(rdi);
@@ -803,7 +803,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
FrameAlign(num_arguments);
-#ifdef __MSVC__
+#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx.
// First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
@@ -821,7 +821,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_->CodeObject());
-#ifndef __MSVC__
+#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
#endif
@@ -980,7 +980,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// store anything volatile in a C call or overwritten by this function.
int num_arguments = 3;
FrameAlign(num_arguments);
-#ifdef __MSVC__
+#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
@@ -1242,10 +1242,10 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
// (on Win64 only) and the original value of rsp.
__ movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frameAlignment));
-#ifdef __MSVC__
+#ifdef _WIN64
// Allocate space for parameters and old rsp.
__ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
- __ and_(rsp, -frameAlignment);
+ __ and_(rsp, Immediate(-frameAlignment));
__ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
#else
// Allocate space for old rsp.
@@ -1264,7 +1264,7 @@ void RegExpMacroAssemblerX64::CallCFunction(Address function_address,
__ movq(rax, reinterpret_cast<intptr_t>(function_address), RelocInfo::NONE);
__ call(rax);
ASSERT(OS::ActivationFrameAlignment() != 0);
-#ifdef __MSVC__
+#ifdef _WIN64
__ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
#else
__ pop(rsp);
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index 091c826..98975fb 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -1738,6 +1738,18 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // Not implemented yet - just jump to generic stub.
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 48ee6e5..d192ebb 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -462,11 +462,11 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, TestResource::dispose_count);
}
@@ -483,11 +483,11 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
@@ -505,11 +505,11 @@ THREADED_TEST(ScriptMakingExternalString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, TestResource::dispose_count);
}
@@ -528,11 +528,11 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
v8::internal::CompilationCache::Clear();
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
@@ -550,8 +550,8 @@ THREADED_TEST(UsingExternalString) {
i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- i::Heap::CollectAllGarbage();
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
+ i::Heap::CollectAllGarbage(false);
}
@@ -568,8 +568,8 @@ THREADED_TEST(UsingExternalAsciiString) {
i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
CHECK(isymbol->IsSymbol());
}
- i::Heap::CollectAllGarbage();
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
+ i::Heap::CollectAllGarbage(false);
}
@@ -1333,12 +1333,12 @@ THREADED_TEST(InternalFieldsNativePointers) {
// Check reading and writing aligned pointers.
obj->SetPointerInInternalField(0, aligned);
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
// Check reading and writing unaligned pointers.
obj->SetPointerInInternalField(0, unaligned);
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
delete[] data;
@@ -1351,7 +1351,7 @@ THREADED_TEST(IdentityHash) {
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
Local<v8::Object> obj = v8::Object::New();
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
@@ -1361,7 +1361,7 @@ THREADED_TEST(IdentityHash) {
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
int hash3 = v8::Object::New()->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
@@ -1381,7 +1381,7 @@ THREADED_TEST(HiddenProperties) {
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
// Make sure delete of a non-existent hidden value works
CHECK(obj->DeleteHiddenValue(key));
@@ -1391,7 +1391,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
// Make sure we do not find the hidden property.
CHECK(!obj->Has(empty));
@@ -1402,7 +1402,7 @@ THREADED_TEST(HiddenProperties) {
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
CHECK_EQ(2003, obj->Get(empty)->Int32Value());
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -1413,7 +1413,7 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->Delete(prop_name));
CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
CHECK(obj->DeleteHiddenValue(key));
CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -1429,7 +1429,7 @@ static v8::Handle<Value> InterceptorForHiddenProperties(
}
// The whole goal of this interceptor is to cause a GC during local property
// lookup.
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
i::FLAG_always_compact = saved_always_compact;
return v8::Handle<Value>();
}
@@ -2843,7 +2843,7 @@ TEST(ErrorReporting) {
static const char* js_code_causing_huge_string_flattening =
"var str = 'X';"
- "for (var i = 0; i < 29; i++) {"
+ "for (var i = 0; i < 30; i++) {"
" str = str + str;"
"}"
"str.match(/X/);";
@@ -2982,7 +2982,7 @@ static v8::Handle<Value> ArgumentsTestCallback(const v8::Arguments& args) {
CHECK_EQ(v8::Integer::New(3), args[2]);
CHECK_EQ(v8::Undefined(), args[3]);
v8::HandleScope scope;
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
return v8::Undefined();
}
@@ -4960,7 +4960,7 @@ static v8::Handle<Value> InterceptorHasOwnPropertyGetterGC(
Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
return v8::Handle<Value>();
}
@@ -6165,8 +6165,8 @@ static int GetSurvivingGlobalObjectsCount() {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- v8::internal::Heap::CollectAllGarbage();
- v8::internal::Heap::CollectAllGarbage();
+ v8::internal::Heap::CollectAllGarbage(false);
+ v8::internal::Heap::CollectAllGarbage(false);
v8::internal::HeapIterator it;
while (it.has_next()) {
v8::internal::HeapObject* object = it.next();
@@ -6242,7 +6242,30 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
// weak callback of the first handle would be able to 'reallocate' it.
handle1.MakeWeak(NULL, NewPersistentHandleCallback);
handle2.Dispose();
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
+}
+
+
+v8::Persistent<v8::Object> to_be_disposed;
+
+void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
+ to_be_disposed.Dispose();
+ i::Heap::CollectAllGarbage(false);
+}
+
+
+THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
+ LocalContext context;
+
+ v8::Persistent<v8::Object> handle1, handle2;
+ {
+ v8::HandleScope scope;
+ handle1 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle2 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ }
+ handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
+ to_be_disposed = handle2;
+ i::Heap::CollectAllGarbage(false);
}
@@ -6819,7 +6842,7 @@ class RegExpInterruptTest {
{
v8::Locker lock;
// TODO(lrn): Perhaps create some garbage before collecting.
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
@@ -6940,7 +6963,7 @@ class ApplyInterruptTest {
while (gc_during_apply_ < kRequiredGCs) {
{
v8::Locker lock;
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
gc_count_++;
}
i::OS::Sleep(1);
@@ -7150,6 +7173,30 @@ THREADED_TEST(MorphCompositeStringTest) {
}
+TEST(CompileExternalTwoByteSource) {
+ v8::HandleScope scope;
+ LocalContext context;
+
+ // This is a very short list of sources, which currently is to check for a
+ // regression caused by r2703.
+ const char* ascii_sources[] = {
+ "0.5",
+ "-0.5", // This mainly testes PushBack in the Scanner.
+ "--0.5", // This mainly testes PushBack in the Scanner.
+ NULL
+ };
+
+ // Compile the sources as external two byte strings.
+ for (int i = 0; ascii_sources[i] != NULL; i++) {
+ uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]);
+ UC16VectorResource uc16_resource(
+ i::Vector<const uint16_t>(two_byte_string, strlen(ascii_sources[i])));
+ v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource);
+ v8::Script::Compile(source);
+ }
+}
+
+
class RegExpStringModificationTest {
public:
RegExpStringModificationTest()
@@ -7633,11 +7680,11 @@ THREADED_TEST(PixelArray) {
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
pixel_data);
- i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
+ i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i);
}
- i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
+ i::Heap::CollectAllGarbage(false); // Force GC to trigger verification.
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(i, pixels->get(i));
CHECK_EQ(i, pixel_data[i]);
@@ -7768,6 +7815,7 @@ THREADED_TEST(PixelArray) {
free(pixel_data);
}
+
THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope;
LocalContext c1;
@@ -7783,6 +7831,7 @@ THREADED_TEST(ScriptContextDependence) {
CHECK_EQ(indep->Run()->Int32Value(), 101);
}
+
THREADED_TEST(StackTrace) {
v8::HandleScope scope;
LocalContext context;
@@ -7795,3 +7844,11 @@ THREADED_TEST(StackTrace) {
v8::String::Utf8Value stack(try_catch.StackTrace());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
}
+
+
+// Test that idle notification can be handled when V8 has not yet been
+// set up.
+THREADED_TEST(IdleNotification) {
+ for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
+ for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
+}
diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc
index f5e4f3a..bd09d0d 100644
--- a/V8Binding/v8/test/cctest/test-debug.cc
+++ b/V8Binding/v8/test/cctest/test-debug.cc
@@ -414,8 +414,8 @@ void CheckDebuggerUnloaded(bool check_functions) {
CHECK_EQ(NULL, Debug::debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- Heap::CollectAllGarbage();
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
+ Heap::CollectAllGarbage(false);
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator;
@@ -843,7 +843,7 @@ static void DebugEventBreakPointCollectGarbage(
Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
} else {
// Mark sweep (and perhaps compact).
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
}
}
}
@@ -1206,7 +1206,7 @@ static void CallAndGC(v8::Local<v8::Object> recv, v8::Local<v8::Function> f) {
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
f->Call(recv, 0, NULL);
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
@@ -5094,7 +5094,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
script_collected_count = 0;
v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -5106,7 +5106,7 @@ TEST(ScriptCollectedEvent) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
CHECK_EQ(2, script_collected_count);
@@ -5141,7 +5141,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to ensure that only the script in this test will be
// collected afterwards.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
{
@@ -5152,7 +5152,7 @@ TEST(ScriptCollectedEventContext) {
// Do garbage collection to collect the script above which is no longer
// referenced.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
CHECK_EQ(2, script_collected_message_count);
diff --git a/V8Binding/v8/test/cctest/test-disasm-arm.cc b/V8Binding/v8/test/cctest/test-disasm-arm.cc
index 1cca17d..69efdc5 100644
--- a/V8Binding/v8/test/cctest/test-disasm-arm.cc
+++ b/V8Binding/v8/test/cctest/test-disasm-arm.cc
@@ -123,13 +123,13 @@ TEST(Type0) {
"20354189 eorcss r4, r5, r9, lsl #3");
COMPARE(sub(r5, r6, Operand(r10, LSL, 31), LeaveCC, hs),
- "20465f8a subcs r5, r6, sl, lsl #31");
+ "20465f8a subcs r5, r6, r10, lsl #31");
COMPARE(sub(r5, r6, Operand(r10, LSL, 30), SetCC, cc),
- "30565f0a subccs r5, r6, sl, lsl #30");
+ "30565f0a subccs r5, r6, r10, lsl #30");
COMPARE(sub(r5, r6, Operand(r10, LSL, 24), LeaveCC, lo),
- "30465c0a subcc r5, r6, sl, lsl #24");
+ "30465c0a subcc r5, r6, r10, lsl #24");
COMPARE(sub(r5, r6, Operand(r10, LSL, 16), SetCC, mi),
- "4056580a submis r5, r6, sl, lsl #16");
+ "4056580a submis r5, r6, r10, lsl #16");
COMPARE(rsb(r6, r7, Operand(fp)),
"e067600b rsb r6, r7, fp");
@@ -163,7 +163,7 @@ TEST(Type0) {
COMPARE(sbc(r7, r9, Operand(ip, ROR, 4)),
"e0c9726c sbc r7, r9, ip, ror #4");
COMPARE(sbc(r7, r10, Operand(ip), SetCC),
- "e0da700c sbcs r7, sl, ip");
+ "e0da700c sbcs r7, r10, ip");
COMPARE(sbc(r7, ip, Operand(ip, ROR, 31), SetCC, hi),
"80dc7fec sbchis r7, ip, ip, ror #31");
@@ -240,7 +240,7 @@ TEST(Type0) {
"51d10004 bicpls r0, r1, r4");
COMPARE(mvn(r10, Operand(r1)),
- "e1e0a001 mvn sl, r1");
+ "e1e0a001 mvn r10, r1");
COMPARE(mvn(r9, Operand(r2)),
"e1e09002 mvn r9, r2");
COMPARE(mvn(r0, Operand(r3), SetCC),
diff --git a/V8Binding/v8/test/cctest/test-log-stack-tracer.cc b/V8Binding/v8/test/cctest/test-log-stack-tracer.cc
index 1ef0a93..43df6ba 100644
--- a/V8Binding/v8/test/cctest/test-log-stack-tracer.cc
+++ b/V8Binding/v8/test/cctest/test-log-stack-tracer.cc
@@ -336,8 +336,10 @@ static void CFuncDoTrace() {
#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
__asm mov [fp], ebp // NOLINT
#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
- // FIXME: I haven't really tried to compile it.
- __asm movq [fp], rbp // NOLINT
+ // TODO(X64): __asm extension is not supported by the Microsoft Visual C++
+ // 64-bit compiler.
+ fp = 0;
+ UNIMPLEMENTED();
#endif
DoTrace(fp);
}
diff --git a/V8Binding/v8/test/cctest/test-log.cc b/V8Binding/v8/test/cctest/test-log.cc
index df58234..5884a41 100644
--- a/V8Binding/v8/test/cctest/test-log.cc
+++ b/V8Binding/v8/test/cctest/test-log.cc
@@ -685,7 +685,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
" obj.test =\n"
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
- i::Heap::CollectAllGarbage();
+ i::Heap::CollectAllGarbage(false);
EmbeddedVector<char, 204800> buffer;
int log_size;
diff --git a/V8Binding/v8/test/cctest/test-serialize.cc b/V8Binding/v8/test/cctest/test-serialize.cc
index 36f051f..6939a80 100644
--- a/V8Binding/v8/test/cctest/test-serialize.cc
+++ b/V8Binding/v8/test/cctest/test-serialize.cc
@@ -125,12 +125,14 @@ TEST(ExternalReferenceEncoder) {
encoder.Encode(the_hole_value_location.address()));
ExternalReference stack_guard_limit_address =
ExternalReference::address_of_stack_guard_limit();
- CHECK_EQ(make_code(UNCLASSIFIED, 3),
+ CHECK_EQ(make_code(UNCLASSIFIED, 4),
encoder.Encode(stack_guard_limit_address.address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 5),
+ CHECK_EQ(make_code(UNCLASSIFIED, 10),
encoder.Encode(ExternalReference::debug_break().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 6),
encoder.Encode(ExternalReference::new_space_start().address()));
+ CHECK_EQ(make_code(UNCLASSIFIED, 3),
+ encoder.Encode(ExternalReference::roots_address().address()));
}
@@ -157,9 +159,9 @@ TEST(ExternalReferenceDecoder) {
CHECK_EQ(ExternalReference::the_hole_value_location().address(),
decoder.Decode(make_code(UNCLASSIFIED, 2)));
CHECK_EQ(ExternalReference::address_of_stack_guard_limit().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 3)));
+ decoder.Decode(make_code(UNCLASSIFIED, 4)));
CHECK_EQ(ExternalReference::debug_break().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 5)));
+ decoder.Decode(make_code(UNCLASSIFIED, 10)));
CHECK_EQ(ExternalReference::new_space_start().address(),
decoder.Decode(make_code(UNCLASSIFIED, 6)));
}
diff --git a/V8Binding/v8/test/cctest/test-strings.cc b/V8Binding/v8/test/cctest/test-strings.cc
index 3065ba1..127b7a2 100644
--- a/V8Binding/v8/test/cctest/test-strings.cc
+++ b/V8Binding/v8/test/cctest/test-strings.cc
@@ -480,7 +480,7 @@ TEST(Regress9746) {
// symbol entry in the symbol table because it is used by the script
// kept alive by the weak wrapper. Make sure we don't destruct the
// external string.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
CHECK(!resource_destructed);
{
@@ -499,7 +499,7 @@ TEST(Regress9746) {
// Forcing another garbage collection should let us get rid of the
// slice from the symbol table. The external string remains in the
// heap until the next GC.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
CHECK(!resource_destructed);
v8::HandleScope scope;
Handle<String> key_string = Factory::NewStringFromAscii(key_vector);
@@ -508,7 +508,7 @@ TEST(Regress9746) {
// Forcing yet another garbage collection must allow us to finally
// get rid of the external string.
- Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage(false);
CHECK(resource_destructed);
delete[] source;
diff --git a/V8Binding/v8/test/mjsunit/date-parse.js b/V8Binding/v8/test/mjsunit/date-parse.js
index bb7ecd2..4bbb2c6 100644
--- a/V8Binding/v8/test/mjsunit/date-parse.js
+++ b/V8Binding/v8/test/mjsunit/date-parse.js
@@ -250,8 +250,8 @@ testCasesMisc.forEach(testDateParseMisc);
// Test that we can parse our own date format.
-// (Dates from 1970 to ~2070 with 95h steps.)
-for (var i = 0; i < 24 * 365 * 100; i += 95) {
+// (Dates from 1970 to ~2070 with 150h steps.)
+for (var i = 0; i < 24 * 365 * 100; i += 150) {
var ms = i * (3600 * 1000);
var s = (new Date(ms)).toString();
assertEquals(ms, Date.parse(s), "parse own: " + s);
diff --git a/V8Binding/v8/test/mjsunit/debug-stepin-constructor.js b/V8Binding/v8/test/mjsunit/debug-stepin-constructor.js
index 6dbe5d1..6ee3347 100644
--- a/V8Binding/v8/test/mjsunit/debug-stepin-constructor.js
+++ b/V8Binding/v8/test/mjsunit/debug-stepin-constructor.js
@@ -59,6 +59,10 @@ function f() {
break_break_point_hit_count = 0;
f();
assertEquals(5, break_break_point_hit_count);
+f();
+assertEquals(10, break_break_point_hit_count);
+f();
+assertEquals(15, break_break_point_hit_count);
// Test step into constructor with builtin constructor.
function g() {
diff --git a/V8Binding/v8/test/mjsunit/mjsunit.status b/V8Binding/v8/test/mjsunit/mjsunit.status
index 4bf67e8..6ac4938 100644
--- a/V8Binding/v8/test/mjsunit/mjsunit.status
+++ b/V8Binding/v8/test/mjsunit/mjsunit.status
@@ -52,7 +52,7 @@ debug-evaluate-recursive: CRASH || FAIL
debug-changebreakpoint: CRASH || FAIL
debug-clearbreakpoint: CRASH || FAIL
debug-clearbreakpointgroup: PASS, FAIL if $mode == debug
-debug-conditional-breakpoints: FAIL
+debug-conditional-breakpoints: CRASH || FAIL
debug-evaluate: CRASH || FAIL
debug-ignore-breakpoints: CRASH || FAIL
debug-multiple-breakpoints: CRASH || FAIL
diff --git a/V8Binding/v8/test/mjsunit/simple-constructor.js b/V8Binding/v8/test/mjsunit/simple-constructor.js
index b26d651..e9ae921 100755
--- a/V8Binding/v8/test/mjsunit/simple-constructor.js
+++ b/V8Binding/v8/test/mjsunit/simple-constructor.js
@@ -53,9 +53,11 @@ function f4(x) {
}
o1_1 = new f1();
+assertEquals(1, o1_1.x, "1");
o1_2 = new f1();
-assertArrayEquals(["x"], props(o1_1));
-assertArrayEquals(["x"], props(o1_2));
+assertEquals(1, o1_1.x, "2");
+assertArrayEquals(["x"], props(o1_1), "3");
+assertArrayEquals(["x"], props(o1_2), "4");
o2_1 = new f2(0);
o2_2 = new f2(0);
@@ -76,3 +78,63 @@ o4_1_1 = new f4(1);
o4_1_2 = new f4(1);
assertArrayEquals(["x", "y"], props(o4_1_1));
assertArrayEquals(["x", "y"], props(o4_1_2));
+
+function f5(x, y) {
+ this.x = x;
+ this.y = y;
+}
+
+function f6(x, y) {
+ this.y = y;
+ this.x = x;
+}
+
+function f7(x, y, z) {
+ this.x = x;
+ this.y = y;
+}
+
+function testArgs(fun) {
+ obj = new fun();
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals(void 0, obj.x);
+ assertEquals(void 0, obj.y);
+
+ obj = new fun("x");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals(void 0, obj.y);
+
+ obj = new fun("x", "y");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals("y", obj.y);
+
+ obj = new fun("x", "y", "z");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals("y", obj.y);
+}
+
+for (var i = 0; i < 10; i++) {
+ testArgs(f5);
+ testArgs(f6);
+ testArgs(f7);
+}
+
+function g(){
+ this.x=1
+}
+
+o = new g();
+assertEquals(1, o.x);
+o = new g();
+assertEquals(1, o.x);
+g.prototype = {y:2}
+o = new g();
+assertEquals(1, o.x);
+assertEquals(2, o.y);
+o = new g();
+assertEquals(1, o.x);
+assertEquals(2, o.y);
+
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index a1551dc..41395b3 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -171,7 +171,7 @@ js1_5/Regress/regress-98901: PASS || FAIL
# Tests that sorting arrays of ints is less than 3 times as fast
# as sorting arrays of strings.
-js1_5/extensions/regress-371636: PASS || FAIL
+js1_5/extensions/regress-371636: PASS || FAIL || TIMEOUT if $mode == debug
# Tests depend on GC timings. Inherently flaky.
@@ -624,7 +624,6 @@ js1_5/extensions/regress-333541: FAIL_OK
js1_5/extensions/regress-335700: FAIL_OK
js1_5/extensions/regress-336409-1: FAIL_OK
js1_5/extensions/regress-336409-2: FAIL_OK
-js1_5/extensions/regress-336410-1: FAIL_OK
js1_5/extensions/regress-336410-2: FAIL_OK
js1_5/extensions/regress-341956-01: FAIL_OK
js1_5/extensions/regress-341956-02: FAIL_OK
@@ -706,6 +705,11 @@ js1_5/extensions/toLocaleFormat-02: FAIL_OK
js1_5/extensions/regress-330569: TIMEOUT
js1_5/extensions/regress-351448: TIMEOUT
js1_5/extensions/regress-342960: FAIL_OK || TIMEOUT if $mode == debug
+# In the 64-bit version, this test takes longer to run out of memory
+# than it does in the 32-bit version when attempting to generate a huge
+# error message in debug mode.
+js1_5/extensions/regress-336410-1: FAIL_OK || TIMEOUT if ($mode == debug && $arch == x64)
+
##################### DECOMPILATION TESTS #####################
diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp
index b0c3331..037efa7 100644
--- a/V8Binding/v8/tools/gyp/v8.gyp
+++ b/V8Binding/v8/tools/gyp/v8.gyp
@@ -66,6 +66,7 @@
'DEBUG',
'_DEBUG',
'ENABLE_DISASSEMBLER',
+ 'V8_ENABLE_CHECKS'
],
'msvs_settings': {
'VCCLCompilerTool': {
@@ -97,9 +98,15 @@
],
}],
],
- 'cflags_cc': [
- '-fno-rtti',
- ],
+ }],
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
+ 'GCC_STRICT_ALIASING': 'YES', # -fstrict-aliasing. Mainline gcc
+ # enables this at -O2 and above,
+ # but Apple gcc does not unless it
+ # is specified explicitly.
+ },
}],
['OS=="win"', {
'msvs_configuration_attributes': {
@@ -128,10 +135,6 @@
],
},
},
- 'xcode_settings': {
- 'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',
- 'GCC_ENABLE_CPP_RTTI': 'NO',
- },
},
'targets': [
{
@@ -387,7 +390,7 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
- '../../src/arm/cfg-arm.cc',
+ '../../src/arm/cfg-arm.cc',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -418,7 +421,7 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/cfg-ia32.cc',
+ '../../src/ia32/cfg-ia32.cc',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -451,7 +454,7 @@
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
- '../../src/x64/cfg-x64.cc',
+ '../../src/x64/cfg-x64.cc',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
diff --git a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
index 45e6361..f9241f9 100644
--- a/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/V8Binding/v8/tools/v8.xcodeproj/project.pbxproj
@@ -1394,6 +1394,7 @@
GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)",
DEBUG,
+ V8_ENABLE_CHECKS,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES;
@@ -1457,6 +1458,7 @@
V8_TARGET_ARCH_IA32,
V8_NATIVE_REGEXP,
DEBUG,
+ V8_ENABLE_CHECKS,
);
HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = v8_shell;
diff --git a/V8Binding/v8/tools/visual_studio/d8_x64.vcproj b/V8Binding/v8/tools/visual_studio/d8_x64.vcproj
index dd2b83d..5c47a8a 100644
--- a/V8Binding/v8/tools/visual_studio/d8_x64.vcproj
+++ b/V8Binding/v8/tools/visual_studio/d8_x64.vcproj
@@ -50,6 +50,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -111,6 +112,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
diff --git a/V8Binding/v8/tools/visual_studio/debug.vsprops b/V8Binding/v8/tools/visual_studio/debug.vsprops
index 0abf924..5e3555a 100644
--- a/V8Binding/v8/tools/visual_studio/debug.vsprops
+++ b/V8Binding/v8/tools/visual_studio/debug.vsprops
@@ -7,7 +7,7 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
- PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER"
+ PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS"
RuntimeLibrary="1"
/>
<Tool
diff --git a/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj b/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
index fc7ac4b..d0fbac6 100644
--- a/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_cctest_x64.vcproj
@@ -50,6 +50,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -111,6 +112,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
diff --git a/V8Binding/v8/tools/visual_studio/v8_shell_sample_x64.vcproj b/V8Binding/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
index ab276f4..e1d5164 100644
--- a/V8Binding/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
+++ b/V8Binding/v8/tools/visual_studio/v8_shell_sample_x64.vcproj
@@ -50,6 +50,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
@@ -111,6 +112,7 @@
<Tool
Name="VCLinkerTool"
AdditionalDependencies="winmm.lib Ws2_32.lib"
+ TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION
index 27dc5b3..92b1f9c 100644
--- a/WEBKIT_MERGE_REVISION
+++ b/WEBKIT_MERGE_REVISION
@@ -2,4 +2,4 @@ We sync with Chromium release revision, which has both webkit revision and V8 re
http://src.chromium.org/svn/branches/187/src@18043
http://svn.webkit.org/repository/webkit/trunk@47029
- http://v8.googlecode.com/svn/branches/bleeding_edge@2727
+ http://v8.googlecode.com/svn/branches/bleeding_edge@2780