summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--V8Binding/v8/ChangeLog28
-rw-r--r--V8Binding/v8/LICENSE6
-rw-r--r--V8Binding/v8/SConstruct11
-rw-r--r--V8Binding/v8/include/v8-debug.h7
-rw-r--r--V8Binding/v8/include/v8.h61
-rwxr-xr-xV8Binding/v8/src/SConscript44
-rw-r--r--V8Binding/v8/src/api.cc105
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc145
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h9
-rw-r--r--V8Binding/v8/src/arm/jump-target-arm.cc336
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.cc126
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.h58
-rw-r--r--V8Binding/v8/src/cfg.cc142
-rw-r--r--V8Binding/v8/src/cfg.h140
-rw-r--r--V8Binding/v8/src/codegen.cc29
-rw-r--r--V8Binding/v8/src/compilation-cache.cc1
-rw-r--r--V8Binding/v8/src/compiler.cc40
-rw-r--r--V8Binding/v8/src/compiler.h4
-rw-r--r--V8Binding/v8/src/d8.cc36
-rw-r--r--V8Binding/v8/src/d8.h2
-rw-r--r--V8Binding/v8/src/execution.cc7
-rw-r--r--V8Binding/v8/src/factory.cc6
-rw-r--r--V8Binding/v8/src/factory.h3
-rw-r--r--V8Binding/v8/src/flag-definitions.h3
-rw-r--r--V8Binding/v8/src/handles-inl.h2
-rw-r--r--V8Binding/v8/src/handles.h19
-rw-r--r--V8Binding/v8/src/heap.cc28
-rw-r--r--V8Binding/v8/src/heap.h6
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.h9
-rw-r--r--V8Binding/v8/src/ia32/cpu-ia32.cc13
-rw-r--r--V8Binding/v8/src/ia32/jump-target-ia32.cc64
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc4
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc250
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h59
-rw-r--r--V8Binding/v8/src/ia32/virtual-frame-ia32.cc1
-rw-r--r--V8Binding/v8/src/jsregexp.cc79
-rw-r--r--V8Binding/v8/src/jump-target.cc61
-rw-r--r--V8Binding/v8/src/math.js2
-rw-r--r--V8Binding/v8/src/messages.js143
-rw-r--r--V8Binding/v8/src/parser.cc12
-rw-r--r--V8Binding/v8/src/parser.h3
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler-irregexp.cc31
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler-irregexp.h12
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler-tracer.cc58
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler-tracer.h14
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler.cc190
-rw-r--r--V8Binding/v8/src/regexp-macro-assembler.h102
-rw-r--r--V8Binding/v8/src/regexp-stack.h4
-rw-r--r--V8Binding/v8/src/runtime.cc41
-rw-r--r--V8Binding/v8/src/runtime.h2
-rw-r--r--V8Binding/v8/src/scanner.cc97
-rw-r--r--V8Binding/v8/src/scanner.h57
-rw-r--r--V8Binding/v8/src/spaces.cc107
-rw-r--r--V8Binding/v8/src/spaces.h37
-rw-r--r--V8Binding/v8/src/third_party/valgrind/valgrind.h3924
-rw-r--r--V8Binding/v8/src/uri.js16
-rw-r--r--V8Binding/v8/src/v8.cc9
-rw-r--r--V8Binding/v8/src/v8.h3
-rw-r--r--V8Binding/v8/src/v8natives.js7
-rw-r--r--V8Binding/v8/src/version.cc2
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc119
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h87
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc242
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h9
-rw-r--r--V8Binding/v8/src/x64/disasm-x64.cc3
-rw-r--r--V8Binding/v8/src/x64/jump-target-x64.cc64
-rw-r--r--V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc1274
-rw-r--r--V8Binding/v8/src/x64/regexp-macro-assembler-x64.h268
-rw-r--r--V8Binding/v8/src/x64/virtual-frame-x64.cc1
-rw-r--r--V8Binding/v8/test/cctest/cctest.status1
-rw-r--r--V8Binding/v8/test/cctest/test-api.cc28
-rw-r--r--V8Binding/v8/test/cctest/test-debug.cc17
-rw-r--r--V8Binding/v8/test/cctest/test-regexp.cc443
-rw-r--r--V8Binding/v8/test/mjsunit/json.js10
-rw-r--r--V8Binding/v8/test/mjsunit/mjsunit.js8
-rw-r--r--V8Binding/v8/test/mjsunit/stack-traces.js63
-rw-r--r--V8Binding/v8/test/mjsunit/tools/logreader.js16
-rw-r--r--V8Binding/v8/test/mjsunit/tools/tickprocessor.js72
-rw-r--r--V8Binding/v8/test/mozilla/mozilla.status18
-rw-r--r--V8Binding/v8/tools/gyp/v8.gyp18
-rw-r--r--V8Binding/v8/tools/logreader.js7
-rwxr-xr-xV8Binding/v8/tools/mac-nm4
-rw-r--r--V8Binding/v8/tools/tickprocessor.js33
-rw-r--r--WEBKIT_MERGE_REVISION2
84 files changed, 7873 insertions, 1751 deletions
diff --git a/V8Binding/v8/ChangeLog b/V8Binding/v8/ChangeLog
index 03b96f1..4bfd8d5 100644
--- a/V8Binding/v8/ChangeLog
+++ b/V8Binding/v8/ChangeLog
@@ -1,3 +1,31 @@
+2009-08-13: Version 1.3.4
+
+ Added a readline() command to the d8 shell.
+
+ Fixed bug in json parsing.
+
+ Added idle notification to the API and reduced memory on idle
+ notifications.
+
+
+2009-08-12: Version 1.3.3
+
+ Fix issue 417: incorrect %t placeholder expansion.
+
+ Add .gitignore file similar to Chromium's one.
+
+ Fix SConstruct file to build with new logging code for Android.
+
+ API: added function to find instance of template in prototype
+ chain. Inlined Object::IsInstanceOf.
+
+ Land change to notify valgrind when we modify code on x86.
+
+ Add api call to determine whether a string can be externalized.
+
+ Add a write() command to d8.
+
+
2009-08-05: Version 1.3.2
Started new compiler infrastructure for two-pass compilation using a
diff --git a/V8Binding/v8/LICENSE b/V8Binding/v8/LICENSE
index 7301556..553cf47 100644
--- a/V8Binding/v8/LICENSE
+++ b/V8Binding/v8/LICENSE
@@ -20,6 +20,12 @@ are:
copyrighted by Douglas Crockford and Baruch Even and released under
an MIT license.
+ - Valgrind client API header, located at third_party/valgrind/valgrind.h
+ This is release under the BSD license.
+
+ - Valgrind client API header, located at third_party/valgrind/valgrind.h
+ This is release under the BSD license.
+
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
diff --git a/V8Binding/v8/SConstruct b/V8Binding/v8/SConstruct
index c981ef9..efd34db 100644
--- a/V8Binding/v8/SConstruct
+++ b/V8Binding/v8/SConstruct
@@ -101,6 +101,9 @@ LIBRARY_FLAGS = {
'regexp:native': {
'arch:ia32' : {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
+ },
+ 'arch:x64' : {
+ 'CPPDEFINES': ['V8_NATIVE_REGEXP']
}
}
},
@@ -166,7 +169,7 @@ LIBRARY_FLAGS = {
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
- 'CCFLAGS': ['-fno-strict-aliasing', '-m64'],
+ 'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
@@ -716,7 +719,11 @@ class BuildContext(object):
result = []
result += source.get('all', [])
for (name, value) in self.options.iteritems():
- result += source.get(name + ':' + value, [])
+ source_value = source.get(name + ':' + value, [])
+ if type(source_value) == dict:
+ result += self.GetRelevantSources(source_value)
+ else:
+ result += source_value
return sorted(result)
def AppendFlags(self, options, added):
diff --git a/V8Binding/v8/include/v8-debug.h b/V8Binding/v8/include/v8-debug.h
index 345d331..3c5c923 100644
--- a/V8Binding/v8/include/v8-debug.h
+++ b/V8Binding/v8/include/v8-debug.h
@@ -228,9 +228,14 @@ class EXPORT Debug {
* }
* \endcode
*/
- static Handle<Value> Call(v8::Handle<v8::Function> fun,
+ static Local<Value> Call(v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>());
+ /**
+ * Returns a mirror object for the given object.
+ */
+ static Local<Value> GetMirror(v8::Handle<v8::Value> obj);
+
/**
* Enable the V8 builtin debug agent. The debugger agent will listen on the
* supplied TCP/IP port for remote debugger connection.
diff --git a/V8Binding/v8/include/v8.h b/V8Binding/v8/include/v8.h
index 6815ec9..b931976 100644
--- a/V8Binding/v8/include/v8.h
+++ b/V8Binding/v8/include/v8.h
@@ -513,10 +513,36 @@ class V8EXPORT ScriptOrigin {
class V8EXPORT Script {
public:
+ /**
+ * Compiles the specified script. The ScriptOrigin* and ScriptData*
+ * parameters are owned by the caller of Script::Compile. No
+ * references to these objects are kept after compilation finishes.
+ *
+ * The script object returned is context independent; when run it
+ * will use the currently entered context.
+ */
+ static Local<Script> New(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL);
+
+ /**
+ * Compiles the specified script using the specified file name
+ * object (typically a string) as the script's origin.
+ *
+ * The script object returned is context independent; when run it
+ * will use the currently entered context.
+ */
+ static Local<Script> New(Handle<String> source,
+ Handle<Value> file_name);
+
/**
* Compiles the specified script. The ScriptOrigin* and ScriptData*
* parameters are owned by the caller of Script::Compile. No
* references to these objects are kept after compilation finishes.
+ *
+ * The script object returned is bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL,
@@ -525,12 +551,20 @@ class V8EXPORT Script {
/**
* Compiles the specified script using the specified file name
* object (typically a string) as the script's origin.
+ *
+ * The script object returned is bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
*/
static Local<Script> Compile(Handle<String> source,
Handle<Value> file_name);
/**
- * Runs the script returning the resulting value.
+ * Runs the script returning the resulting value. If the script is
+ * context independent (created using ::New) it will be run in the
+ * currently entered context. If it is context specific (created
+ * using ::Compile) it will be run in the context in which it was
+ * compiled.
*/
Local<Value> Run();
@@ -901,6 +935,11 @@ class V8EXPORT String : public Primitive {
*/
bool MakeExternal(ExternalAsciiStringResource* resource);
+ /**
+ * Returns true if this string can be made external.
+ */
+ bool CanMakeExternal();
+
/** Creates an undetectable string from the supplied ascii or utf-8 data.*/
static Local<String> NewUndetectable(const char* data, int length = -1);
@@ -2185,11 +2224,7 @@ class V8EXPORT V8 {
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
#if defined(ANDROID)
- /**
- * Android system sends the browser low memory signal, and it is good to
- * collect JS heap, which can free some DOM nodes.
- * This function should ONLY be used by the browser in low memory condition.
- */
+ // TODO: upstream to V8.
static void CollectAllGarbage();
#endif
@@ -2204,6 +2239,14 @@ class V8EXPORT V8 {
*/
static bool Dispose();
+
+ /**
+ * Optional notification that the embedder is idle.
+ * V8 uses the notification to reduce memory footprint.
+ * \param is_high_priority tells whether the embedder is high priority.
+ */
+ static void IdleNotification(bool is_high_priority);
+
private:
V8();
@@ -2251,6 +2294,12 @@ class V8EXPORT TryCatch {
Local<Value> Exception() const;
/**
+ * Returns the .stack property of the thrown object. If no .stack
+ * property is present an empty handle is returned.
+ */
+ Local<Value> StackTrace() const;
+
+ /**
* Returns the message associated with this exception. If there is
* no message associated an empty handle is returned.
*
diff --git a/V8Binding/v8/src/SConscript b/V8Binding/v8/src/SConscript
index a9669a1..6a38c1a 100755
--- a/V8Binding/v8/src/SConscript
+++ b/V8Binding/v8/src/SConscript
@@ -63,24 +63,32 @@ SOURCES = {
'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
'arm/virtual-frame-arm.cc'
],
- 'arch:ia32': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
- 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
- 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
- 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/regexp-macro-assembler-ia32.cc',
- 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
- 'ia32/virtual-frame-ia32.cc'
- ],
- 'arch:x64': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- # 'x64/regexp-macro-assembler-x64.cc',
- 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
- ],
+ 'arch:ia32': {
+ 'all': [
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
+ 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
+ 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
+ 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+ 'ia32/virtual-frame-ia32.cc'
+ ],
+ 'regexp:native': [
+ 'ia32/regexp-macro-assembler-ia32.cc',
+ ]
+ },
+ 'arch:x64': {
+ 'all': [
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
+ 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
+ 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
+ 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
+ 'x64/register-allocator-x64.cc',
+ 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ ],
+ 'regexp:native': [
+ 'x64/regexp-macro-assembler-x64.cc'
+ ]
+ },
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/V8Binding/v8/src/api.cc b/V8Binding/v8/src/api.cc
index 3eb6d60..19d1173 100644
--- a/V8Binding/v8/src/api.cc
+++ b/V8Binding/v8/src/api.cc
@@ -1046,7 +1046,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
unibrow::Utf8InputBuffer<> buf(input, length);
- return i::PreParse(&buf, NULL);
+ return i::PreParse(i::Handle<i::String>(), &buf, NULL);
}
@@ -1058,11 +1058,11 @@ ScriptData* ScriptData::New(unsigned* data, int length) {
// --- S c r i p t ---
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* script_data) {
- ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
- LOG_API("Script::Compile");
+Local<Script> Script::New(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ v8::ScriptData* script_data) {
+ ON_BAILOUT("v8::Script::New()", return Local<Script>());
+ LOG_API("Script::New");
ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
@@ -1096,6 +1096,27 @@ Local<Script> Script::Compile(v8::Handle<String> source,
pre_data);
has_pending_exception = boilerplate.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>());
+ return Local<Script>(ToApi<Script>(boilerplate));
+}
+
+
+Local<Script> Script::New(v8::Handle<String> source,
+ v8::Handle<Value> file_name) {
+ ScriptOrigin origin(file_name);
+ return New(source, &origin);
+}
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ v8::ScriptData* script_data) {
+ ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
+ LOG_API("Script::Compile");
+ ENTER_V8;
+ Local<Script> generic = New(source, origin, script_data);
+ if (generic.IsEmpty())
+ return generic;
+ i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
i::Handle<i::JSFunction> result =
i::Factory::NewFunctionFromBoilerplate(boilerplate,
i::Top::global_context());
@@ -1118,6 +1139,10 @@ Local<Value> Script::Run() {
{
HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ if (fun->IsBoilerplate()) {
+ fun = i::Factory::NewFunctionFromBoilerplate(fun,
+ i::Top::global_context());
+ }
EXCEPTION_PREAMBLE();
i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
i::Handle<i::Object> result =
@@ -1194,6 +1219,22 @@ v8::Local<Value> v8::TryCatch::Exception() const {
}
+v8::Local<Value> v8::TryCatch::StackTrace() const {
+ if (HasCaught()) {
+ i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
+ if (!raw_obj->IsJSObject()) return v8::Local<Value>();
+ v8::HandleScope scope;
+ i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
+ i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("stack");
+ if (!obj->HasProperty(*name))
+ return v8::Local<Value>();
+ return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
+ } else {
+ return v8::Local<Value>();
+ }
+}
+
+
v8::Local<v8::Message> v8::TryCatch::Message() const {
if (HasCaught() && message_ != i::Smi::FromInt(0)) {
i::Object* message = reinterpret_cast<i::Object*>(message_);
@@ -2558,6 +2599,10 @@ bool v8::V8::Dispose() {
}
+void v8::V8::IdleNotification(bool is_high_priority) {
+ i::V8::IdleNotification(is_high_priority);
+}
+
const char* v8::V8::GetVersion() {
static v8::internal::EmbeddedVector<char, 128> buffer;
v8::internal::Version::GetString(buffer);
@@ -2987,7 +3032,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (IsDeadCheck("v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
- i::Handle <i::String> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case
@@ -3023,7 +3068,7 @@ bool v8::String::MakeExternal(
if (IsDeadCheck("v8::String::MakeExternal()")) return false;
if (this->IsExternal()) return false; // Already an external string.
ENTER_V8;
- i::Handle <i::String> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case
@@ -3038,6 +3083,17 @@ bool v8::String::MakeExternal(
}
+bool v8::String::CanMakeExternal() {
+ if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
+ i::Handle<i::String> obj = Utils::OpenHandle(this);
+ int size = obj->Size(); // Byte size of the original string.
+ if (size < i::ExternalString::kSize)
+ return false;
+ i::StringShape shape(*obj);
+ return !shape.IsExternal();
+}
+
+
Local<v8::Object> v8::Object::New() {
EnsureInitialized("v8::Object::New()");
LOG_API("Object::New");
@@ -3299,9 +3355,12 @@ int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
}
+#if defined(ANDROID)
void V8::CollectAllGarbage() {
+ // TODO: call MarkCompact GC
i::Heap::CollectAllGarbage();
}
+#endif
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
@@ -3545,10 +3604,10 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
}
-Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
- if (!i::V8::IsRunning()) return Handle<Value>();
- ON_BAILOUT("v8::Debug::Call()", return Handle<Value>());
+Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
+ v8::Handle<v8::Value> data) {
+ if (!i::V8::IsRunning()) return Local<Value>();
+ ON_BAILOUT("v8::Debug::Call()", return Local<Value>());
ENTER_V8;
i::Handle<i::Object> result;
EXCEPTION_PREAMBLE();
@@ -3566,6 +3625,28 @@ Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
}
+Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
+ if (!i::V8::IsRunning()) return Local<Value>();
+ ON_BAILOUT("v8::Debug::GetMirror()", return Local<Value>());
+ ENTER_V8;
+ v8::HandleScope scope;
+ i::Debug::Load();
+ i::Handle<i::JSObject> debug(i::Debug::debug_context()->global());
+ i::Handle<i::String> name = i::Factory::LookupAsciiSymbol("MakeMirror");
+ i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
+ i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
+ v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
+ const int kArgc = 1;
+ v8::Handle<v8::Value> argv[kArgc] = { obj };
+ EXCEPTION_PREAMBLE();
+ v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
+ kArgc,
+ argv);
+ EXCEPTION_BAILOUT_CHECK(Local<Value>());
+ return scope.Close(result);
+}
+
+
bool Debug::EnableAgent(const char* name, int port) {
return i::Debugger::StartAgent(name, port);
}
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
index 67d4611..d3507ec 100644
--- a/V8Binding/v8/src/arm/codegen-arm.cc
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -1036,11 +1036,7 @@ void CodeGenerator::Comparison(Condition cc,
// We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict);
frame_->CallStub(&stub, 0);
-
- Result result = allocator_->Allocate(r0);
- ASSERT(result.is_valid());
- __ cmp(result.reg(), Operand(0));
- result.Unuse();
+ __ cmp(r0, Operand(0));
exit.Jump();
// Do smi comparisons by pointer comparison.
@@ -1749,9 +1745,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0));
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
jsobject.Bind();
@@ -1832,15 +1827,10 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_register = allocator_->Allocate(r0);
- ASSERT(arg_count_register.is_valid());
- __ mov(arg_count_register.reg(), Operand(1));
- Result result = frame_->InvokeBuiltin(Builtins::FILTER_KEY,
- CALL_JS,
- &arg_count_register,
- 2);
- __ mov(r3, Operand(result.reg()));
- result.Unuse();
+ Result arg_count_reg(r0);
+ __ mov(r0, Operand(1));
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ __ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
__ cmp(r3, Operand(Factory::null_value()));
@@ -2433,9 +2423,8 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name = allocator_->Allocate(r2);
- ASSERT(name.is_valid()); // We are in spilled code.
- __ mov(name.reg(), Operand(slot->var()->name()));
+ Result name(r2);
+ __ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
@@ -2775,9 +2764,8 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
Comment cmnt(masm_, "[ CatchExtensionObject");
LoadAndSpill(node->key());
LoadAndSpill(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->EmitPush(result.reg());
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->EmitPush(r0);
ASSERT(frame_->height() == original_height + 1);
}
@@ -3117,24 +3105,22 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
// r0: the number of arguments.
- Result num_args = allocator_->Allocate(r0);
- ASSERT(num_args.is_valid());
- __ mov(num_args.reg(), Operand(arg_count));
+ Result num_args(r0);
+ __ mov(r0, Operand(arg_count));
// Load the function into r1 as per calling convention.
- Result function = allocator_->Allocate(r1);
- ASSERT(function.is_valid());
- __ ldr(function.reg(), frame_->ElementAt(arg_count + 1));
+ Result function(r1);
+ __ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- Result result = frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic,
+ RelocInfo::CONSTRUCT_CALL,
+ &num_args,
+ &function,
+ arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3477,9 +3463,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) {
@@ -3488,9 +3473,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -3503,9 +3487,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else {
@@ -3556,9 +3539,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
continue_label.Jump();
@@ -3581,9 +3563,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ Result arg_count(r0);
+ __ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
continue_label.Bind();
break;
@@ -3669,9 +3650,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(0));
+ Result arg_count(r0);
+ __ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
}
if (is_postfix) {
@@ -4048,14 +4028,10 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1)); // not counting receiver
- Result result = frame_->InvokeBuiltin(Builtins::IN,
- CALL_JS,
- &arg_count,
- 2);
- frame_->EmitPush(result.reg());
+ Result arg_count(r0);
+ __ mov(r0, Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->EmitPush(r0);
break;
}
@@ -4063,9 +4039,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
LoadAndSpill(left);
LoadAndSpill(right);
InstanceofStub stub;
- Result result = frame_->CallStub(&stub, 2);
+ frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
- __ tst(result.reg(), Operand(result.reg()));
+ __ tst(r0, Operand(r0));
cc_reg_ = eq;
break;
}
@@ -4135,15 +4111,14 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg = cgen_->allocator()->Allocate(r2);
- ASSERT(name_reg.is_valid());
- __ mov(name_reg.reg(), Operand(name));
+ Result name_reg(r2);
+ __ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
- frame->EmitPush(answer.reg());
+ frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->EmitPush(r0);
break;
}
@@ -4162,8 +4137,8 @@ void Reference::GetValue(TypeofState typeof_state) {
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame->CallCodeObject(ic, rmode, 0);
- frame->EmitPush(answer.reg());
+ frame->CallCodeObject(ic, rmode, 0);
+ frame->EmitPush(r0);
break;
}
@@ -4272,20 +4247,18 @@ void Reference::SetValue(InitState init_state) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value = cgen_->allocator()->Allocate(r0);
- ASSERT(value.is_valid());
- frame->EmitPop(value.reg());
+ Result value(r0);
+ frame->EmitPop(r0);
// Setup the name register.
- Result property_name = cgen_->allocator()->Allocate(r2);
- ASSERT(property_name.is_valid());
- __ mov(property_name.reg(), Operand(name));
- Result answer = frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
- frame->EmitPush(answer.reg());
+ Result property_name(r2);
+ __ mov(r2, Operand(name));
+ frame->CallCodeObject(ic,
+ RelocInfo::CODE_TARGET,
+ &value,
+ &property_name,
+ 0);
+ frame->EmitPush(r0);
break;
}
@@ -4298,12 +4271,10 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value = cgen_->allocator()->Allocate(r0);
- ASSERT(value.is_valid());
- frame->EmitPop(value.reg()); // value
- Result result =
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
- frame->EmitPush(result.reg());
+ Result value(r0);
+ frame->EmitPop(r0); // value
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->EmitPush(r0);
break;
}
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
index 80d1d56..70a7b27 100644
--- a/V8Binding/v8/src/arm/codegen-arm.h
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -152,14 +152,9 @@ class CodeGenerator: public AstVisitor {
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
- int length,
- int function_token_position,
- int start_position,
- int end_position,
- bool is_expression,
+ FunctionLiteral* lit,
bool is_toplevel,
- Handle<Script> script,
- Handle<String> inferred_name);
+ Handle<Script> script);
// Accessors
MacroAssembler* masm() { return masm_; }
diff --git a/V8Binding/v8/src/arm/jump-target-arm.cc b/V8Binding/v8/src/arm/jump-target-arm.cc
index a925c51..3315f83 100644
--- a/V8Binding/v8/src/arm/jump-target-arm.cc
+++ b/V8Binding/v8/src/arm/jump-target-arm.cc
@@ -47,23 +47,29 @@ void JumpTarget::DoJump() {
ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
+ // Backward jump. There already a frame expectation at the target.
ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
- __ jmp(&entry_label_);
} else {
- // Preconfigured entry frame is not used on ARM.
- ASSERT(entry_frame_ == NULL);
- // Forward jump. The current frame is added to the end of the list
- // of frames reaching the target block and a jump to the merge code
- // is emitted.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
+ // Use the current frame as the expected one at the target if necessary.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = cgen()->frame();
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ } else {
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ }
+
+ // The predicate is_linked() should be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ if (!is_linked()) {
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+ }
}
+ __ jmp(&entry_label_);
}
@@ -74,56 +80,21 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(direction_ == BIDIRECTIONAL);
// Backward branch. We have an expected frame to merge to on the
// backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ b(cc, &entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ b(cc, &merge_labels_[i]);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ b(NegateCondition(cc), &original_fall_through);
cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ b(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
} else {
- // Preconfigured entry frame is not used on ARM.
- ASSERT(entry_frame_ == NULL);
- // Forward branch. A copy of the current frame is added to the end
- // of the list of frames reaching the target block and a branch to
- // the merge code is emitted.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- __ b(cc, &merge_labels_.last());
+ // Clone the current frame to use as the expected one at the target if
+ // necessary.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ }
+ // The predicate is_linked() should be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ if (!is_linked()) {
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+ }
}
+ __ b(cc, &entry_label_);
}
@@ -139,13 +110,19 @@ void JumpTarget::Call() {
ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
- cgen()->frame()->SpillAll();
+ // Calls are always 'forward' so we use a copy of the current frame (plus
+ // one for a return address) as the expected frame.
+ ASSERT(entry_frame_ == NULL);
VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ bl(&merge_labels_.last());
+ entry_frame_ = target_frame;
+
+ // The predicate is_linked() should now be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+
+ __ bl(&entry_label_);
}
@@ -156,168 +133,105 @@ void JumpTarget::DoBind() {
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
- if (direction_ == FORWARD_ONLY) {
- // A simple case: no forward jumps and no possible backward jumps.
- if (!is_linked()) {
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- ASSERT(cgen()->has_valid_frame());
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
- __ bind(&entry_label_);
- return;
- }
-
- // Another simple case: no fall through, a single forward jump,
- // and no possible backward jumps.
- if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
- // Pick up the only reaching frame, take ownership of it, and
- // use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
- __ bind(&entry_label_);
- return;
- }
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
+ // If there is a current frame we can use it on the fall through.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ } else {
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ }
+ } else {
+ // If there is no current frame we must have an entry frame which we can
+ // copy.
+ ASSERT(entry_frame_ != NULL);
RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
- // Compute the frame to use for entry to the block.
- if (entry_frame_ == NULL) {
- ComputeEntryFrame();
+ // The predicate is_linked() should be made false. Its implementation
+ // detects the presence (or absence) of frame pointers in the
+ // reaching_frames_ list. If we inserted a bogus frame to make
+ // is_linked() true, remove it now.
+ if (is_linked()) {
+ reaching_frames_.Clear();
}
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
+ __ bind(&entry_label_);
+}
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // and possible fall through to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ b(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through, and it didn't need merge
- // code, we need to pick up the frame so we can jump around
- // subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
+void BreakTarget::Jump() {
+ // On ARM we do not currently emit merge code for jumps, so we need to do
+ // it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->Drop(count);
+ DoJump();
+}
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
+void BreakTarget::Jump(Result* arg) {
+ // On ARM we do not currently emit merge code for jumps, so we need to do
+ // it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->Drop(count);
+ cgen()->frame()->Push(arg);
+ DoJump();
+}
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even
+ // on the fall through. This is so we can bind the return target
+ // with state on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ // On ARM we do not currently emit merge code at binding sites, so we need
+ // to do it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ cgen()->frame()->Drop(count);
}
- __ bind(&entry_label_);
+ DoBind();
}
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_ + 1);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even
+ // on the fall through. This is so we can bind the return target
+ // with state on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ // On ARM we do not currently emit merge code at binding sites, so we need
+ // to do it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ }
+ DoBind();
+ *arg = cgen()->frame()->Pop();
+}
+
+
#undef __
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc
index 3d0ada7..d3dabf8 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.cc
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc
@@ -76,72 +76,23 @@ void VirtualFrame::SyncRange(int begin, int end) {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ sub(sp, sp, Operand(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Fix any sync bit problems from the bottom-up, stopping when we
- // hit the stack pointer or the top of the frame if the stack
- // pointer is floating above the frame.
- int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
- for (int i = 0; i <= limit; i++) {
- FrameElement source = elements_[i];
- FrameElement target = expected->elements_[i];
- if (source.is_synced() && !target.is_synced()) {
- elements_[i].clear_sync();
- } else if (!source.is_synced() && target.is_synced()) {
- SyncElementAt(i);
- }
- }
-
- // Adjust the stack point downard if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ add(sp, sp, Operand(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
+ // ARM frames are currently always in memory.
ASSERT(Equals(expected));
}
void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- // On ARM, all elements are in memory.
-
-#ifdef DEBUG
- int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
- for (int i = start; i >= 0; i--) {
- ASSERT(elements_[i].is_memory());
- ASSERT(expected->elements_[i].is_memory());
- }
-#endif
+ UNREACHABLE();
}
void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+ UNREACHABLE();
}
void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+ UNREACHABLE();
}
@@ -235,76 +186,62 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
+void VirtualFrame::RawCallStub(CodeStub* stub) {
ASSERT(cgen()->HasValidEntryRegisters());
__ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
PrepareForCall(0, 0);
arg->Unuse();
- return RawCallStub(stub);
+ RawCallStub(stub);
}
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
PrepareForCall(0, 0);
arg0->Unuse();
arg1->Unuse();
- return RawCallStub(stub);
+ RawCallStub(stub);
}
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- Result* arg_count_register,
- int arg_count) {
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ Result* arg_count_register,
+ int arg_count) {
ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
- Result result = cgen()->allocator()->Allocate(r0);
- return result;
}
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(r0);
- ASSERT(result.is_valid());
- return result;
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
@@ -325,14 +262,14 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
break;
}
PrepareForCall(spilled_args, dropped_args);
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
int spilled_args = 0;
switch (code->kind()) {
case Code::LOAD_IC:
@@ -353,15 +290,15 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
}
PrepareForCall(spilled_args, dropped_args);
arg->Unuse();
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
-Result VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args) {
int spilled_args = 1;
switch (code->kind()) {
case Code::STORE_IC:
@@ -385,11 +322,12 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
PrepareForCall(spilled_args, dropped_args);
arg0->Unuse();
arg1->Unuse();
- return RawCallCodeObject(code, rmode);
+ RawCallCodeObject(code, rmode);
}
void VirtualFrame::Drop(int count) {
+ ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.h b/V8Binding/v8/src/arm/virtual-frame-arm.h
index d575df6..457478d 100644
--- a/V8Binding/v8/src/arm/virtual-frame-arm.h
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.h
@@ -112,12 +112,14 @@ class VirtualFrame : public ZoneObject {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
- ForgetElements(count);
+ // On ARM, all elements are in memory, so there is no extra bookkeeping
+ // (registers, copies, etc.) beyond dropping the elements.
+ elements_.Rewind(stack_pointer_ + 1);
}
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
+ // Forget count elements from the top of the frame and adjust the stack
+ // pointer downward. This is used, for example, before merging frames at
+ // break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
@@ -281,46 +283,46 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count) {
+ void CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
- return RawCallStub(stub);
+ RawCallStub(stub);
}
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
- Result CallStub(CodeStub* stub, Result* arg);
+ void CallStub(CodeStub* stub, Result* arg);
// Call stub that expects its arguments in r1 and r0. The arguments
// are given as results which must be the appropriate registers.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+ void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
+ void CallRuntime(Runtime::Function* f, int arg_count);
+ void CallRuntime(Runtime::FunctionId id, int arg_count);
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- Result* arg_count_register,
- int arg_count);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flag,
+ Result* arg_count_register,
+ int arg_count);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and
// consumed by the call.
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- Result CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ void CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
@@ -504,11 +506,11 @@ class VirtualFrame : public ZoneObject {
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
- Result RawCallStub(CodeStub* stub);
+ void RawCallStub(CodeStub* stub);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+ void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
diff --git a/V8Binding/v8/src/cfg.cc b/V8Binding/v8/src/cfg.cc
index 32f614b..d2dff52 100644
--- a/V8Binding/v8/src/cfg.cc
+++ b/V8Binding/v8/src/cfg.cc
@@ -60,10 +60,10 @@ Cfg* Cfg::Build() {
if (fun->scope()->num_heap_slots() > 0) {
BAILOUT("function has context slots");
}
- if (fun->scope()->num_stack_slots() > kPointerSize) {
+ if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
BAILOUT("function has too many locals");
}
- if (fun->scope()->num_parameters() > kPointerSize - 1) {
+ if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
BAILOUT("function has too many parameters");
}
if (fun->scope()->arguments() != NULL) {
@@ -200,43 +200,22 @@ Handle<Code> Cfg::Compile(Handle<Script> script) {
}
-void MoveInstr::FastAllocate(TempLocation* temp) {
- ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
- if (temp == value()) {
- temp->set_where(TempLocation::ACCUMULATOR);
- } else {
- temp->set_where(TempLocation::STACK);
- }
+void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where(TempLocation::STACK);
}
-void PropLoadInstr::FastAllocate(TempLocation* temp) {
- ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
- if (temp == object() || temp == key()) {
- temp->set_where(TempLocation::ACCUMULATOR);
- } else {
- temp->set_where(TempLocation::STACK);
- }
+void OneOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where((temp == value_)
+ ? TempLocation::ACCUMULATOR
+ : TempLocation::STACK);
}
-void BinaryOpInstr::FastAllocate(TempLocation* temp) {
- ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
- if (temp == left() || temp == right()) {
- temp->set_where(TempLocation::ACCUMULATOR);
- } else {
- temp->set_where(TempLocation::STACK);
- }
-}
-
-
-void ReturnInstr::FastAllocate(TempLocation* temp) {
- ASSERT(temp->where() == TempLocation::NOT_ALLOCATED);
- if (temp == value()) {
- temp->set_where(TempLocation::ACCUMULATOR);
- } else {
- temp->set_where(TempLocation::STACK);
- }
+void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
+ temp->set_where((temp == value0_ || temp == value1_)
+ ? TempLocation::ACCUMULATOR
+ : TempLocation::STACK);
}
@@ -341,6 +320,7 @@ void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
if (lhs->AsProperty() != NULL) {
BAILOUT("unsupported property assignment");
}
+
Variable* var = lhs->AsVariableProxy()->AsVariable();
if (var == NULL) {
BAILOUT("unsupported invalid left-hand side");
@@ -354,6 +334,7 @@ void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
BAILOUT("unsupported slot lhs (not a parameter or local)");
}
+ // Parameter and local slot assignments.
ExpressionCfgBuilder builder;
SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
builder.Build(expr->value(), loc);
@@ -382,11 +363,11 @@ void ExpressionCfgBuilder::VisitProperty(Property* expr) {
ExpressionCfgBuilder object, key;
object.Build(expr->obj(), NULL);
if (object.graph() == NULL) {
- BAILOUT("unsupported object subexpression in propref");
+ BAILOUT("unsupported object subexpression in propload");
}
key.Build(expr->key(), NULL);
if (key.graph() == NULL) {
- BAILOUT("unsupported key subexpression in propref");
+ BAILOUT("unsupported key subexpression in propload");
}
if (destination_ == NULL) destination_ = new TempLocation();
@@ -639,9 +620,8 @@ void Cfg::Print() {
void Constant::Print() {
- PrintF("Constant(");
+ PrintF("Constant ");
handle_->Print();
- PrintF(")");
}
@@ -651,13 +631,13 @@ void Nowhere::Print() {
void SlotLocation::Print() {
- PrintF("Slot(");
+ PrintF("Slot ");
switch (type_) {
case Slot::PARAMETER:
- PrintF("PARAMETER, %d)", index_);
+ PrintF("(PARAMETER, %d)", index_);
break;
case Slot::LOCAL:
- PrintF("LOCAL, %d)", index_);
+ PrintF("(LOCAL, %d)", index_);
break;
default:
UNREACHABLE();
@@ -666,45 +646,87 @@ void SlotLocation::Print() {
void TempLocation::Print() {
- PrintF("Temp(%d)", number());
+ PrintF("Temp %d", number());
}
-void MoveInstr::Print() {
- PrintF("Move(");
+void OneOperandInstruction::Print() {
+ PrintF("(");
location()->Print();
PrintF(", ");
value_->Print();
- PrintF(")\n");
+ PrintF(")");
}
-void PropLoadInstr::Print() {
- PrintF("PropLoad(");
+void TwoOperandInstruction::Print() {
+ PrintF("(");
location()->Print();
PrintF(", ");
- object()->Print();
+ value0_->Print();
PrintF(", ");
- key()->Print();
- PrintF(")\n");
+ value1_->Print();
+ PrintF(")");
+}
+
+
+void MoveInstr::Print() {
+ PrintF("Move ");
+ OneOperandInstruction::Print();
+ PrintF("\n");
+}
+
+
+void PropLoadInstr::Print() {
+ PrintF("PropLoad ");
+ TwoOperandInstruction::Print();
+ PrintF("\n");
}
void BinaryOpInstr::Print() {
- PrintF("BinaryOp(");
- location()->Print();
- PrintF(", %s, ", Token::Name(op()));
- left()->Print();
- PrintF(", ");
- right()->Print();
- PrintF(")\n");
+ switch (op()) {
+ case Token::OR:
+ // Two character operand.
+ PrintF("BinaryOp[OR] ");
+ break;
+ case Token::AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ // Three character operands.
+ PrintF("BinaryOp[%s] ", Token::Name(op()));
+ break;
+ case Token::COMMA:
+ // Five character operand.
+ PrintF("BinaryOp[COMMA] ");
+ break;
+ case Token::BIT_OR:
+ // Six character operand.
+ PrintF("BinaryOp[BIT_OR] ");
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ // Seven character operands.
+ PrintF("BinaryOp[%s] ", Token::Name(op()));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ TwoOperandInstruction::Print();
+ PrintF("\n");
}
void ReturnInstr::Print() {
- PrintF("Return(");
- value_->Print();
- PrintF(")\n");
+ PrintF("Return ");
+ OneOperandInstruction::Print();
+ PrintF("\n");
}
@@ -715,7 +737,7 @@ void InstructionBlock::Print() {
for (int i = 0, len = instructions_.length(); i < len; i++) {
instructions_[i]->Print();
}
- PrintF("Goto L%d\n\n", successor_->number());
+ PrintF("Goto L%d\n\n", successor_->number());
successor_->Print();
}
}
diff --git a/V8Binding/v8/src/cfg.h b/V8Binding/v8/src/cfg.h
index 2031839..0eb0f92 100644
--- a/V8Binding/v8/src/cfg.h
+++ b/V8Binding/v8/src/cfg.h
@@ -307,7 +307,10 @@ class TempLocation : public Location {
// Accessors.
Where where() { return where_; }
- void set_where(Where where) { where_ = where; }
+ void set_where(Where where) {
+ ASSERT(where_ == TempLocation::NOT_ALLOCATED);
+ where_ = where;
+ }
// Predicates.
bool is_on_stack() { return where_ == STACK; }
@@ -343,14 +346,6 @@ class TempLocation : public Location {
// be generated.
class Instruction : public ZoneObject {
public:
- // Every instruction has a location where its result is stored (which may
- // be Nowhere, the default).
- Instruction() : location_(CfgGlobals::current()->nowhere()) {}
-
- explicit Instruction(Location* location) : location_(location) {}
-
- virtual ~Instruction() {}
-
// Accessors.
Location* location() { return location_; }
void set_location(Location* location) { location_ = location; }
@@ -370,16 +365,84 @@ class Instruction : public ZoneObject {
#endif
protected:
+ // Every instruction has a location where its result is stored (which may
+ // be Nowhere).
+ explicit Instruction(Location* location) : location_(location) {}
+
+ virtual ~Instruction() {}
+
Location* location_;
};
+// Base class of instructions that have no input operands.
+class ZeroOperandInstruction : public Instruction {
+ public:
+ // Support for fast-compilation mode:
+ virtual void Compile(MacroAssembler* masm) = 0;
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ // Printing support: print the operands (nothing).
+ virtual void Print() {}
+#endif
+
+ protected:
+ explicit ZeroOperandInstruction(Location* loc) : Instruction(loc) {}
+};
+
+
+// Base class of instructions that have a single input operand.
+class OneOperandInstruction : public Instruction {
+ public:
+ // Support for fast-compilation mode:
+ virtual void Compile(MacroAssembler* masm) = 0;
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ // Printing support: print the operands.
+ virtual void Print();
+#endif
+
+ protected:
+ OneOperandInstruction(Location* loc, Value* value)
+ : Instruction(loc), value_(value) {
+ }
+
+ Value* value_;
+};
+
+
+// Base class of instructions that have two input operands.
+class TwoOperandInstruction : public Instruction {
+ public:
+ // Support for fast-compilation mode:
+ virtual void Compile(MacroAssembler* masm) = 0;
+ void FastAllocate(TempLocation* temp);
+
+#ifdef DEBUG
+ // Printing support: print the operands.
+ virtual void Print();
+#endif
+
+ protected:
+ TwoOperandInstruction(Location* loc, Value* value0, Value* value1)
+ : Instruction(loc), value0_(value0), value1_(value1) {
+ }
+
+ Value* value0_;
+ Value* value1_;
+};
+
+
// A phantom instruction that indicates the start of a statement. It
// causes the statement position to be recorded in the relocation
// information but generates no code.
-class PositionInstr : public Instruction {
+class PositionInstr : public ZeroOperandInstruction {
public:
- explicit PositionInstr(int pos) : pos_(pos) {}
+ explicit PositionInstr(int pos)
+ : ZeroOperandInstruction(CfgGlobals::current()->nowhere()), pos_(pos) {
+ }
// Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
@@ -399,67 +462,60 @@ class PositionInstr : public Instruction {
// Move a value to a location.
-class MoveInstr : public Instruction {
+class MoveInstr : public OneOperandInstruction {
public:
- MoveInstr(Location* loc, Value* value) : Instruction(loc), value_(value) {}
+ MoveInstr(Location* loc, Value* value)
+ : OneOperandInstruction(loc, value) {
+ }
// Accessors.
Value* value() { return value_; }
// Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
- void FastAllocate(TempLocation* temp);
#ifdef DEBUG
+ // Printing support.
void Print();
#endif
-
- private:
- Value* value_;
};
// Load a property from a receiver, leaving the result in a location.
-class PropLoadInstr : public Instruction {
+class PropLoadInstr : public TwoOperandInstruction {
public:
PropLoadInstr(Location* loc, Value* object, Value* key)
- : Instruction(loc), object_(object), key_(key) {
+ : TwoOperandInstruction(loc, object, key) {
}
// Accessors.
- Value* object() { return object_; }
- Value* key() { return key_; }
+ Value* object() { return value0_; }
+ Value* key() { return value1_; }
// Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
- void FastAllocate(TempLocation* temp);
#ifdef DEBUG
void Print();
#endif
-
- private:
- Value* object_;
- Value* key_;
};
// Perform a (non-short-circuited) binary operation on a pair of values,
// leaving the result in a location.
-class BinaryOpInstr : public Instruction {
+class BinaryOpInstr : public TwoOperandInstruction {
public:
BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
- : Instruction(loc), op_(op), left_(left), right_(right) {
+ : TwoOperandInstruction(loc, left, right), op_(op) {
}
// Accessors.
+ Value* left() { return value0_; }
+ Value* right() { return value1_; }
Token::Value op() { return op_; }
- Value* left() { return left_; }
- Value* right() { return right_; }
// Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
- void FastAllocate(TempLocation* temp);
#ifdef DEBUG
void Print();
@@ -467,8 +523,6 @@ class BinaryOpInstr : public Instruction {
private:
Token::Value op_;
- Value* left_;
- Value* right_;
};
@@ -477,9 +531,11 @@ class BinaryOpInstr : public Instruction {
// block, and implies that the block is closed (cannot have instructions
// appended or graph fragments concatenated to the end) and that the block's
// successor is the global exit node for the current function.
-class ReturnInstr : public Instruction {
+class ReturnInstr : public OneOperandInstruction {
public:
- explicit ReturnInstr(Value* value) : value_(value) {}
+ explicit ReturnInstr(Value* value)
+ : OneOperandInstruction(CfgGlobals::current()->nowhere(), value) {
+ }
virtual ~ReturnInstr() {}
@@ -488,14 +544,10 @@ class ReturnInstr : public Instruction {
// Support for fast-compilation mode.
void Compile(MacroAssembler* masm);
- void FastAllocate(TempLocation* temp);
#ifdef DEBUG
void Print();
#endif
-
- private:
- Value* value_;
};
@@ -716,11 +768,11 @@ class LocationSet BASE_EMBEDDED {
void AddElement(SlotLocation* location) {
if (location->type() == Slot::PARAMETER) {
// Parameter indexes begin with -1 ('this').
- ASSERT(location->index() < kPointerSize - 1);
+ ASSERT(location->index() < kBitsPerPointer - 1);
parameters_ |= (1 << (location->index() + 1));
} else {
ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kPointerSize);
+ ASSERT(location->index() < kBitsPerPointer);
locals_ |= (1 << location->index());
}
}
@@ -733,11 +785,11 @@ class LocationSet BASE_EMBEDDED {
bool Contains(SlotLocation* location) {
if (location->type() == Slot::PARAMETER) {
- ASSERT(location->index() < kPointerSize - 1);
+ ASSERT(location->index() < kBitsPerPointer - 1);
return (parameters_ & (1 << (location->index() + 1)));
} else {
ASSERT(location->type() == Slot::LOCAL);
- ASSERT(location->index() < kPointerSize);
+ ASSERT(location->index() < kBitsPerPointer);
return (locals_ & (1 << location->index()));
}
}
@@ -782,7 +834,7 @@ class ExpressionCfgBuilder : public AstVisitor {
#undef DECLARE_VISIT
private:
- // State for the visitor. Input parameters:
+ // State for the visitor. Input parameter:
Location* destination_;
// Output parameters:
diff --git a/V8Binding/v8/src/codegen.cc b/V8Binding/v8/src/codegen.cc
index 7a4bb12..c80d906 100644
--- a/V8Binding/v8/src/codegen.cc
+++ b/V8Binding/v8/src/codegen.cc
@@ -243,23 +243,18 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
// in the full script source. When counting characters in the script source the
// the first character is number 0 (not 1).
void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
- int length,
- int function_token_position,
- int start_position,
- int end_position,
- bool is_expression,
+ FunctionLiteral* lit,
bool is_toplevel,
- Handle<Script> script,
- Handle<String> inferred_name) {
- fun->shared()->set_length(length);
- fun->shared()->set_formal_parameter_count(length);
+ Handle<Script> script) {
+ fun->shared()->set_length(lit->num_parameters());
+ fun->shared()->set_formal_parameter_count(lit->num_parameters());
fun->shared()->set_script(*script);
- fun->shared()->set_function_token_position(function_token_position);
- fun->shared()->set_start_position(start_position);
- fun->shared()->set_end_position(end_position);
- fun->shared()->set_is_expression(is_expression);
+ fun->shared()->set_function_token_position(lit->function_token_position());
+ fun->shared()->set_start_position(lit->start_position());
+ fun->shared()->set_end_position(lit->end_position());
+ fun->shared()->set_is_expression(lit->is_expression());
fun->shared()->set_is_toplevel(is_toplevel);
- fun->shared()->set_inferred_name(*inferred_name);
+ fun->shared()->set_inferred_name(*lit->inferred_name());
}
@@ -317,11 +312,7 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
node->materialized_literal_count(),
node->contains_array_literal(),
code);
- CodeGenerator::SetFunctionInfo(function, node->num_parameters(),
- node->function_token_position(),
- node->start_position(), node->end_position(),
- node->is_expression(), false, script_,
- node->inferred_name());
+ CodeGenerator::SetFunctionInfo(function, node, false, script_);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
diff --git a/V8Binding/v8/src/compilation-cache.cc b/V8Binding/v8/src/compilation-cache.cc
index ec5b39c..8dd9ec1 100644
--- a/V8Binding/v8/src/compilation-cache.cc
+++ b/V8Binding/v8/src/compilation-cache.cc
@@ -290,7 +290,6 @@ void CompilationCacheScript::Put(Handle<String> source,
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
- // TODO(X64): -fstrict-aliasing causes a problem with table. Fix it.
CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
}
diff --git a/V8Binding/v8/src/compiler.cc b/V8Binding/v8/src/compiler.cc
index f0d97fe..feff492 100644
--- a/V8Binding/v8/src/compiler.cc
+++ b/V8Binding/v8/src/compiler.cc
@@ -102,7 +102,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
static bool IsValidJSON(FunctionLiteral* lit) {
- if (!lit->body()->length() == 1)
+ if (lit->body()->length() != 1)
return false;
Statement* stmt = lit->body()->at(0);
if (stmt->AsExpressionStatement() == NULL)
@@ -114,7 +114,7 @@ static bool IsValidJSON(FunctionLiteral* lit) {
static Handle<JSFunction> MakeFunction(bool is_global,
bool is_eval,
- bool is_json,
+ Compiler::ValidationState validate,
Handle<Script> script,
Handle<Context> context,
v8::Extension* extension,
@@ -129,6 +129,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
script->set_context_data((*i::Top::global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
+ bool is_json = (validate == Compiler::VALIDATE_JSON);
if (is_eval || is_json) {
script->set_compilation_type(
is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) :
@@ -162,7 +163,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// When parsing JSON we do an ordinary parse and then afterwards
// check the AST to ensure it was well-formed. If not we give a
// syntax error.
- if (is_json && !IsValidJSON(lit)) {
+ if (validate == Compiler::VALIDATE_JSON && !IsValidJSON(lit)) {
HandleScope scope;
Handle<JSArray> args = Factory::NewJSArray(1);
Handle<Object> source(script->source());
@@ -218,11 +219,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
lit->contains_array_literal(),
code);
- CodeGenerator::SetFunctionInfo(fun, lit->scope()->num_parameters(),
- RelocInfo::kNoPosition,
- lit->start_position(), lit->end_position(),
- lit->is_expression(), true, script,
- lit->inferred_name());
+ ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ CodeGenerator::SetFunctionInfo(fun, lit, true, script);
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -268,7 +266,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
if (pre_data == NULL && source_length >= FLAG_min_preparse_length) {
Access<SafeStringInputBuffer> buf(&safe_string_input_buffer);
buf->Reset(source.location());
- pre_data = PreParse(buf.value(), extension);
+ pre_data = PreParse(source, buf.value(), extension);
}
// Create a script object describing the script to be compiled.
@@ -282,7 +280,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
// Compile the function and add it to the cache.
result = MakeFunction(true,
false,
- false,
+ DONT_VALIDATE_JSON,
script,
Handle<Context>::null(),
extension,
@@ -305,7 +303,11 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- bool is_json) {
+ ValidationState validate) {
+ // Note that if validation is required then no path through this
+ // function is allowed to return a value without validating that
+ // the input is legal json.
+
int source_length = source->length();
Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@@ -314,20 +316,26 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
VMState state(COMPILER);
// Do a lookup in the compilation cache; if the entry is not there,
- // invoke the compiler and add the result to the cache.
- Handle<JSFunction> result =
- CompilationCache::LookupEval(source, context, is_global);
+ // invoke the compiler and add the result to the cache. If we're
+ // evaluating json we bypass the cache since we can't be sure a
+ // potential value in the cache has been validated.
+ Handle<JSFunction> result;
+ if (validate == DONT_VALIDATE_JSON)
+ result = CompilationCache::LookupEval(source, context, is_global);
+
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
result = MakeFunction(is_global,
true,
- is_json,
+ validate,
script,
context,
NULL,
NULL);
- if (!result.is_null()) {
+ if (!result.is_null() && validate != VALIDATE_JSON) {
+ // For json it's unlikely that we'll ever see exactly the same
+ // string again so we don't use the compilation cache.
CompilationCache::PutEval(source, context, is_global, result);
}
}
diff --git a/V8Binding/v8/src/compiler.h b/V8Binding/v8/src/compiler.h
index 9f02a8d..579970b 100644
--- a/V8Binding/v8/src/compiler.h
+++ b/V8Binding/v8/src/compiler.h
@@ -48,6 +48,8 @@ namespace internal {
class Compiler : public AllStatic {
public:
+ enum ValidationState { VALIDATE_JSON, DONT_VALIDATE_JSON };
+
// All routines return a JSFunction.
// If an error occurs an exception is raised and
// the return handle contains NULL.
@@ -63,7 +65,7 @@ class Compiler : public AllStatic {
static Handle<JSFunction> CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
- bool is_json);
+ ValidationState validation);
// Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack
diff --git a/V8Binding/v8/src/d8.cc b/V8Binding/v8/src/d8.cc
index e02c80a..7082280 100644
--- a/V8Binding/v8/src/d8.cc
+++ b/V8Binding/v8/src/d8.cc
@@ -146,27 +146,27 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> Shell::Print(const Arguments& args) {
- bool first = true;
+ Handle<Value> val = Write(args);
+ printf("\n");
+ return val;
+}
+
+
+Handle<Value> Shell::Write(const Arguments& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope;
- if (first) {
- first = false;
- } else {
+ if (i != 0) {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
const char* cstr = ToCString(str);
printf("%s", cstr);
}
- printf("\n");
return Undefined();
}
Handle<Value> Shell::Read(const Arguments& args) {
- if (args.Length() != 1) {
- return ThrowException(String::New("Bad parameters"));
- }
String::Utf8Value file(args[0]);
if (*file == NULL) {
return ThrowException(String::New("Error loading file"));
@@ -179,6 +179,19 @@ Handle<Value> Shell::Read(const Arguments& args) {
}
+Handle<Value> Shell::ReadLine(const Arguments& args) {
+ char line_buf[256];
+ if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+ return ThrowException(String::New("Error reading line"));
+ }
+ int len = strlen(line_buf);
+ if (line_buf[len - 1] == '\n') {
+ --len;
+ }
+ return String::New(line_buf, len);
+}
+
+
Handle<Value> Shell::Load(const Arguments& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope;
@@ -399,7 +412,10 @@ void Shell::Initialize() {
HandleScope scope;
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
+ global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+ global_template->Set(String::New("readline"),
+ FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
@@ -588,8 +604,12 @@ void ShellThread::Run() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"),
FunctionTemplate::New(Shell::Print));
+ global_template->Set(String::New("write"),
+ FunctionTemplate::New(Shell::Write));
global_template->Set(String::New("read"),
FunctionTemplate::New(Shell::Read));
+ global_template->Set(String::New("readline"),
+ FunctionTemplate::New(Shell::ReadLine));
global_template->Set(String::New("load"),
FunctionTemplate::New(Shell::Load));
global_template->Set(String::New("yield"),
diff --git a/V8Binding/v8/src/d8.h b/V8Binding/v8/src/d8.h
index 092e3a3..c93ea46 100644
--- a/V8Binding/v8/src/d8.h
+++ b/V8Binding/v8/src/d8.h
@@ -138,10 +138,12 @@ class Shell: public i::AllStatic {
#endif
static Handle<Value> Print(const Arguments& args);
+ static Handle<Value> Write(const Arguments& args);
static Handle<Value> Yield(const Arguments& args);
static Handle<Value> Quit(const Arguments& args);
static Handle<Value> Version(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
+ static Handle<Value> ReadLine(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
diff --git a/V8Binding/v8/src/execution.cc b/V8Binding/v8/src/execution.cc
index 4ab6b61..9080f5e 100644
--- a/V8Binding/v8/src/execution.cc
+++ b/V8Binding/v8/src/execution.cc
@@ -174,12 +174,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
// Regular expressions can be called as functions in both Firefox
// and Safari so we allow it too.
- bool is_regexp =
- object->IsHeapObject() &&
- (HeapObject::cast(*object)->map()->constructor() ==
- *Top::regexp_function());
-
- if (is_regexp) {
+ if (object->IsJSRegExp()) {
Handle<String> exec = Factory::exec_symbol();
return Handle<Object>(object->GetProperty(*exec));
}
diff --git a/V8Binding/v8/src/factory.cc b/V8Binding/v8/src/factory.cc
index 36554df..bb6987b 100644
--- a/V8Binding/v8/src/factory.cc
+++ b/V8Binding/v8/src/factory.cc
@@ -87,8 +87,10 @@ Handle<String> Factory::NewStringFromUtf8(Vector<const char> string,
}
-Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string) {
- CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string), String);
+Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(Heap::AllocateStringFromTwoByte(string, pretenure),
+ String);
}
diff --git a/V8Binding/v8/src/factory.h b/V8Binding/v8/src/factory.h
index 4db5d4e..ddf71de 100644
--- a/V8Binding/v8/src/factory.h
+++ b/V8Binding/v8/src/factory.h
@@ -92,7 +92,8 @@ class Factory : public AllStatic {
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Handle<String> NewStringFromTwoByte(Vector<const uc16> str);
+ static Handle<String> NewStringFromTwoByte(Vector<const uc16> str,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates and partially initializes a TwoByte String. The characters of
// the string are uninitialized. Currently used in regexp code only, where
diff --git a/V8Binding/v8/src/flag-definitions.h b/V8Binding/v8/src/flag-definitions.h
index 3df11f7..c05feb4 100644
--- a/V8Binding/v8/src/flag-definitions.h
+++ b/V8Binding/v8/src/flag-definitions.h
@@ -161,6 +161,9 @@ DEFINE_bool(trace_gc_verbose, false,
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+// v8.cc
+DEFINE_bool(use_idle_notification, true,
+ "Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
diff --git a/V8Binding/v8/src/handles-inl.h b/V8Binding/v8/src/handles-inl.h
index 6013c5b..8478bb5 100644
--- a/V8Binding/v8/src/handles-inl.h
+++ b/V8Binding/v8/src/handles-inl.h
@@ -39,7 +39,7 @@ namespace internal {
template<class T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
- location_ = reinterpret_cast<T**>(HandleScope::CreateHandle(obj));
+ location_ = HandleScope::CreateHandle(obj);
}
diff --git a/V8Binding/v8/src/handles.h b/V8Binding/v8/src/handles.h
index ba2694f..8c9cbeb 100644
--- a/V8Binding/v8/src/handles.h
+++ b/V8Binding/v8/src/handles.h
@@ -82,7 +82,7 @@ class Handle {
}
static Handle<T> null() { return Handle<T>(); }
- bool is_null() {return location_ == NULL; }
+ bool is_null() { return location_ == NULL; }
// Closes the given scope, but lets this handle escape. See
// implementation in api.h.
@@ -119,15 +119,18 @@ class HandleScope {
static int NumberOfHandles();
// Creates a new handle with the given value.
- static inline Object** CreateHandle(Object* value) {
- void** result = current_.next;
- if (result == current_.limit) result = Extend();
+ template <typename T>
+ static inline T** CreateHandle(T* value) {
+ void** cur = current_.next;
+ if (cur == current_.limit) cur = Extend();
// Update the current next field, set the value in the created
// handle, and return the result.
- ASSERT(result < current_.limit);
- current_.next = result + 1;
- *reinterpret_cast<Object**>(result) = value;
- return reinterpret_cast<Object**>(result);
+ ASSERT(cur < current_.limit);
+ current_.next = cur + 1;
+
+ T** result = reinterpret_cast<T**>(cur);
+ *result = value;
+ return result;
}
private:
diff --git a/V8Binding/v8/src/heap.cc b/V8Binding/v8/src/heap.cc
index 7c91778..aa7a5c7 100644
--- a/V8Binding/v8/src/heap.cc
+++ b/V8Binding/v8/src/heap.cc
@@ -74,7 +74,7 @@ int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
#else
-int Heap::semispace_size_ = 8*MB;
+int Heap::semispace_size_ = 4*MB;
int Heap::old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB;
#endif
@@ -277,7 +277,9 @@ void Heap::GarbageCollectionPrologue() {
int Heap::SizeOfObjects() {
int total = 0;
AllSpaces spaces;
- while (Space* space = spaces.next()) total += space->Size();
+ while (Space* space = spaces.next()) {
+ total += space->Size();
+ }
return total;
}
@@ -425,6 +427,20 @@ static void VerifySymbolTable() {
}
+void Heap::EnsureFromSpaceIsCommitted() {
+ if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+ // Committing memory to from space failed.
+ // Try shrinking and try again.
+ Shrink();
+ if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+ // Committing memory to from space failed again.
+ // Memory is exhausted and we will die.
+ V8::FatalProcessOutOfMemory("Committing semi space failed.");
+}
+
+
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
@@ -433,7 +449,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
ASSERT(!allocation_allowed_);
global_gc_prologue_callback_();
}
-
+ EnsureFromSpaceIsCommitted();
if (collector == MARK_COMPACTOR) {
MarkCompact(tracer);
@@ -641,11 +657,11 @@ void Heap::Scavenge() {
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.Capacity()) {
- // Double the size of new space if there is room to grow and enough
+ // Grow the size of new space if there is room to grow and enough
// data has survived scavenge since the last expansion.
- // TODO(1240712): NewSpace::Double has a return value which is
+ // TODO(1240712): NewSpace::Grow has a return value which is
// ignored here.
- new_space_.Double();
+ new_space_.Grow();
survived_since_last_expansion_ = 0;
}
diff --git a/V8Binding/v8/src/heap.h b/V8Binding/v8/src/heap.h
index 30522dc..179f9af 100644
--- a/V8Binding/v8/src/heap.h
+++ b/V8Binding/v8/src/heap.h
@@ -280,6 +280,9 @@ class Heap : public AllStatic {
return new_space_.allocation_limit_address();
}
+ // Uncommit unused semi space.
+ static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the heap by marking all spaces read-only/writable.
static void Protect();
@@ -794,6 +797,9 @@ class Heap : public AllStatic {
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
+ // Commits from space if it is uncommitted.
+ static void EnsureFromSpaceIsCommitted();
+
//
// Support for the API.
//
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.h b/V8Binding/v8/src/ia32/codegen-ia32.h
index 1d0cc8b..3b0971d 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32.h
@@ -299,14 +299,9 @@ class CodeGenerator: public AstVisitor {
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
- int length,
- int function_token_position,
- int start_position,
- int end_position,
- bool is_expression,
+ FunctionLiteral* lit,
bool is_toplevel,
- Handle<Script> script,
- Handle<String> inferred_name);
+ Handle<Script> script);
// Accessors
MacroAssembler* masm() { return masm_; }
diff --git a/V8Binding/v8/src/ia32/cpu-ia32.cc b/V8Binding/v8/src/ia32/cpu-ia32.cc
index 82a5565..2107ad9 100644
--- a/V8Binding/v8/src/ia32/cpu-ia32.cc
+++ b/V8Binding/v8/src/ia32/cpu-ia32.cc
@@ -27,6 +27,10 @@
// CPU specific code for ia32 independent of OS goes here.
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
#include "v8.h"
#include "cpu.h"
@@ -49,6 +53,15 @@ void CPU::FlushICache(void* start, size_t size) {
// If flushing of the instruction cache becomes necessary Windows has the
// API function FlushInstructionCache.
+
+ // By default, valgrind only checks the stack for writes that might need to
+ // invalidate already cached translated code. This leads to random
+ // instability when code patches or moves are sometimes unnoticed. One
+ // solution is to run valgrind with --smc-check=all, but this comes at a big
+ // performance cost. We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+ VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
}
diff --git a/V8Binding/v8/src/ia32/jump-target-ia32.cc b/V8Binding/v8/src/ia32/jump-target-ia32.cc
index 587fb2d..c3f2bc1 100644
--- a/V8Binding/v8/src/ia32/jump-target-ia32.cc
+++ b/V8Binding/v8/src/ia32/jump-target-ia32.cc
@@ -362,6 +362,70 @@ void JumpTarget::DoBind() {
__ bind(&entry_label_);
}
+
+void BreakTarget::Jump() {
+ // Drop leftover statement state from the frame before merging, without
+ // emitting code.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+ // Drop leftover statement state from the frame before merging, without
+ // emitting code.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even on
+ // the fall through. This is so we can bind the return target with state
+ // on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ }
+ DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_ + 1);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even on
+ // the fall through. This is so we can bind the return target with state
+ // on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ }
+ DoBind();
+ *arg = cgen()->frame()->Pop();
+}
+
+
#undef __
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index 7782aa9..e362cd3 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -82,8 +82,8 @@ static void RecordWriteHelper(MacroAssembler* masm,
// page_start + kObjectStartOffset + objectSize
// where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
// Add the delta between the end of the normal RSet and the start of the
- // extra RSet to 'object', so that addressing the bit using 'pointer_offset'
- // hits the extra RSet words.
+ // extra RSet to 'page_start', so that addressing the bit using
+ // 'pointer_offset' hits the extra RSet words.
masm->lea(page_start,
Operand(page_start, array_length, times_pointer_size,
Page::kObjectStartOffset + FixedArray::kHeaderSize
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
index a49c1f5..27a66bc 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -54,7 +54,7 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
- * - stack_area_top (High end of the memory area to use as
+ * - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - at_start (if 1, start at start of string, if 0, don't)
* - int* capture_array (int[num_saved_registers_], for output).
@@ -78,13 +78,13 @@ namespace internal {
* character of the string). The remaining registers starts out as garbage.
*
* The data up to the return address must be placed there by the calling
- * code, e.g., by calling the code entry as cast to:
+ * code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_top)
+ * byte* stack_area_base)
*/
#define __ ACCESS_MASM(masm_)
@@ -93,7 +93,6 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
int registers_to_save)
: masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
- constants_(kRegExpConstantsSize),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -156,13 +155,6 @@ void RegExpMacroAssemblerIA32::Bind(Label* label) {
}
-void RegExpMacroAssemblerIA32::CheckBitmap(uc16 start,
- Label* bitmap,
- Label* on_zero) {
- UNIMPLEMENTED();
-}
-
-
void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
__ cmp(current_character(), c);
BranchOrBacktrack(equal, on_equal);
@@ -217,15 +209,9 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
BranchOrBacktrack(greater, on_failure);
}
- Label backtrack;
if (on_failure == NULL) {
- // Avoid inlining the Backtrack macro for each test.
- Label skip_backtrack;
- __ jmp(&skip_backtrack);
- __ bind(&backtrack);
- Backtrack();
- __ bind(&skip_backtrack);
- on_failure = &backtrack;
+ // Instead of inlining a backtrack, (re)use the global backtrack target.
+ on_failure = &backtrack_label_;
}
for (int i = 0; i < str.length(); i++) {
@@ -581,34 +567,6 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
}
}
-void RegExpMacroAssemblerIA32::DispatchHalfNibbleMap(
- uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIA32::DispatchByteMap(
- uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIA32::DispatchHighByteMap(
- byte start,
- Label* byte_map,
- const Vector<Label*>& destinations) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIA32::EmitOrLink(Label* label) {
- UNIMPLEMENTED(); // Has no use.
-}
-
void RegExpMacroAssemblerIA32::Fail() {
ASSERT(FAILURE == 0); // Return value for failure is zero.
@@ -668,17 +626,17 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(edi, Operand(ebp, kInputStart));
// Set up edi to be negative offset from string end.
__ sub(edi, Operand(esi));
- if (num_saved_registers_ > 0) {
+ // Set eax to address of char before start of input
+ // (effectively string position -1).
+ __ lea(eax, Operand(edi, -char_size()));
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ mov(Operand(ebp, kInputStartMinusOne), eax);
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
__ mov(ecx, kRegisterZero);
- // Set eax to address of char before start of input
- // (effectively string position -1).
- __ lea(eax, Operand(edi, -char_size()));
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
@@ -942,139 +900,8 @@ void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
}
-RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Match(
- Handle<Code> regexp_code,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index) {
-
- ASSERT(subject->IsFlat());
- ASSERT(previous_index >= 0);
- ASSERT(previous_index <= subject->length());
-
- // No allocations before calling the regexp, but we can't use
- // AssertNoAllocation, since regexps might be preempted, and another thread
- // might do allocation anyway.
-
- String* subject_ptr = *subject;
- // Character offsets into string.
- int start_offset = previous_index;
- int end_offset = subject_ptr->length();
-
- bool is_ascii = subject->IsAsciiRepresentation();
-
- if (StringShape(subject_ptr).IsCons()) {
- subject_ptr = ConsString::cast(subject_ptr)->first();
- } else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString* slice = SlicedString::cast(subject_ptr);
- start_offset += slice->start();
- end_offset += slice->start();
- subject_ptr = slice->buffer();
- }
- // Ensure that an underlying string has the same ascii-ness.
- ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
- ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
- // String is now either Sequential or External
- int char_size_shift = is_ascii ? 0 : 1;
- int char_length = end_offset - start_offset;
-
- const byte* input_start =
- StringCharacterPosition(subject_ptr, start_offset);
- int byte_length = char_length << char_size_shift;
- const byte* input_end = input_start + byte_length;
- RegExpMacroAssemblerIA32::Result res = Execute(*regexp_code,
- subject_ptr,
- start_offset,
- input_start,
- input_end,
- offsets_vector,
- previous_index == 0);
-
- if (res == SUCCESS) {
- // Capture values are relative to start_offset only.
- // Convert them to be relative to start of string.
- for (int i = 0; i < offsets_vector_length; i++) {
- if (offsets_vector[i] >= 0) {
- offsets_vector[i] += previous_index;
- }
- }
- }
-
- return res;
-}
-
// Private methods:
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
-
-RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Execute(
- Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start) {
- typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, int, Address);
- matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
-
- int at_start_val = at_start ? 1 : 0;
-
- // Ensure that the minimum stack has been allocated.
- RegExpStack stack;
- Address stack_top = RegExpStack::stack_top();
-
- int result = matcher_func(input,
- start_offset,
- input_start,
- input_end,
- output,
- at_start_val,
- stack_top);
- ASSERT(result <= SUCCESS);
- ASSERT(result >= RETRY);
-
- if (result == EXCEPTION && !Top::has_pending_exception()) {
- // We detected a stack overflow (on the backtrack stack) in RegExp code,
- // but haven't created the exception yet.
- Top::StackOverflow();
- }
- return static_cast<Result>(result);
-}
-
-
-int RegExpMacroAssemblerIA32::CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length) {
- // This function is not allowed to cause a garbage collection.
- // A GC might move the calling generated code and invalidate the
- // return address on the stack.
- ASSERT(byte_length % 2 == 0);
- uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
- uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
- size_t length = byte_length >> 1;
-
- for (size_t i = 0; i < length; i++) {
- unibrow::uchar c1 = substring1[i];
- unibrow::uchar c2 = substring2[i];
- if (c1 != c2) {
- unibrow::uchar s1[1] = { c1 };
- canonicalize.get(c1, '\0', s1);
- if (s1[0] != c2) {
- unibrow::uchar s2[1] = { c2 };
- canonicalize.get(c2, '\0', s2);
- if (s1[0] != s2[0]) {
- return 0;
- }
- }
- }
- }
- return 1;
-}
-
-
void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
int num_arguments = 3;
FrameAlign(num_arguments, scratch);
@@ -1096,35 +923,6 @@ static T& frame_entry(Address re_frame, int frame_offset) {
}
-const byte* RegExpMacroAssemblerIA32::StringCharacterPosition(String* subject,
- int start_index) {
- // Not just flat, but ultra flat.
- ASSERT(subject->IsExternalString() || subject->IsSeqString());
- ASSERT(start_index >= 0);
- ASSERT(start_index <= subject->length());
- if (subject->IsAsciiRepresentation()) {
- const byte* address;
- if (StringShape(subject).IsExternal()) {
- const char* data = ExternalAsciiString::cast(subject)->resource()->data();
- address = reinterpret_cast<const byte*>(data);
- } else {
- ASSERT(subject->IsSeqAsciiString());
- char* data = SeqAsciiString::cast(subject)->GetChars();
- address = reinterpret_cast<const byte*>(data);
- }
- return address + start_index;
- }
- const uc16* data;
- if (StringShape(subject).IsExternal()) {
- data = ExternalTwoByteString::cast(subject)->resource()->data();
- } else {
- ASSERT(subject->IsSeqTwoByteString());
- data = SeqTwoByteString::cast(subject)->GetChars();
- }
- return reinterpret_cast<const byte*>(data + start_index);
-}
-
-
int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
@@ -1198,18 +996,18 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Address RegExpMacroAssemblerIA32::GrowStack(Address stack_pointer,
- Address* stack_top) {
+ Address* stack_base) {
size_t size = RegExpStack::stack_capacity();
- Address old_stack_top = RegExpStack::stack_top();
- ASSERT(old_stack_top == *stack_top);
- ASSERT(stack_pointer <= old_stack_top);
- ASSERT(static_cast<size_t>(old_stack_top - stack_pointer) <= size);
- Address new_stack_top = RegExpStack::EnsureCapacity(size * 2);
- if (new_stack_top == NULL) {
+ Address old_stack_base = RegExpStack::stack_base();
+ ASSERT(old_stack_base == *stack_base);
+ ASSERT(stack_pointer <= old_stack_base);
+ ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
+ Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+ if (new_stack_base == NULL) {
return NULL;
}
- *stack_top = new_stack_top;
- return new_stack_top - (old_stack_top - stack_pointer);
+ *stack_base = new_stack_base;
+ return new_stack_base - (old_stack_base - stack_pointer);
}
@@ -1373,11 +1171,5 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpMacroAssemblerIA32::LoadConstantBufferAddress(Register reg,
- ArraySlice* buffer) {
- __ mov(reg, buffer->array());
- __ add(Operand(reg), Immediate(buffer->base_offset()));
-}
-
#undef __
}} // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
index c3d9155..e8b5b2e 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -31,21 +31,8 @@
namespace v8 {
namespace internal {
-class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
+class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
public:
- // Type of input string to generate code for.
- enum Mode { ASCII = 1, UC16 = 2 };
- // Result of calling the generated RegExp code:
- // RETRY: Something significant changed during execution, and the matching
- // should be retried from scratch.
- // EXCEPTION: Something failed during execution. If no exception has been
- // thrown, it's an internal out-of-memory, and the caller should
- // throw the exception.
- // FAILURE: Matching failed.
- // SUCCESS: Matching succeeded, and the output array has been filled with
- // capture positions.
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
-
RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
virtual ~RegExpMacroAssemblerIA32();
virtual int stack_limit_slack();
@@ -54,7 +41,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckBitmap(uc16 start, Label* bitmap, Label* on_zero);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
@@ -88,16 +74,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
int cp_offset,
bool check_offset,
Label* on_no_match);
- virtual void DispatchByteMap(uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations);
- virtual void DispatchHalfNibbleMap(uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations);
- virtual void DispatchHighByteMap(byte start,
- Label* byte_map,
- const Vector<Label*>& destinations);
- virtual void EmitOrLink(Label* label);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
@@ -123,20 +99,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index);
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start);
-
private:
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
@@ -163,16 +125,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
- // Initial size of constant buffers allocated during compilation.
- static const int kRegExpConstantsSize = 256;
-
- static const byte* StringCharacterPosition(String* subject, int start_index);
-
- // Compares two-byte strings case insensitively.
- // Called from generated RegExp code.
- static int CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length);
// Load a number of characters at the given offset from the
// current position, into the current-character register.
@@ -218,11 +170,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
- // Load the address of a "constant buffer" (a slice of a byte array)
- // into a register. The address is computed from the ByteArray* address
- // and an offset. Uses no extra registers.
- void LoadConstantBufferAddress(Register reg, ArraySlice* buffer);
-
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to);
@@ -258,10 +205,6 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
MacroAssembler* masm_;
- // Constant buffer provider. Allocates external storage for storing
- // constants.
- ByteArrayProvider constants_;
-
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
diff --git a/V8Binding/v8/src/ia32/virtual-frame-ia32.cc b/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
index 0854636..1b8232f 100644
--- a/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
+++ b/V8Binding/v8/src/ia32/virtual-frame-ia32.cc
@@ -965,6 +965,7 @@ Result VirtualFrame::CallConstructor(int arg_count) {
void VirtualFrame::Drop(int count) {
+ ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
diff --git a/V8Binding/v8/src/jsregexp.cc b/V8Binding/v8/src/jsregexp.cc
index bd51102..06208aa 100644
--- a/V8Binding/v8/src/jsregexp.cc
+++ b/V8Binding/v8/src/jsregexp.cc
@@ -43,6 +43,7 @@
#include "regexp-macro-assembler-irregexp.h"
#include "regexp-stack.h"
+#ifdef V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_IA32
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
@@ -54,6 +55,7 @@
#else
#error Unsupported target architecture.
#endif
+#endif
#include "interpreter-irregexp.h"
@@ -270,10 +272,11 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// If compilation fails, an exception is thrown and this function
// returns false.
bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
+ Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
#ifdef V8_NATIVE_REGEXP
- if (re->DataAt(JSRegExp::code_index(is_ascii))->IsCode()) return true;
+ if (compiled_code->IsCode()) return true;
#else // ! V8_NATIVE_REGEXP (RegExp interpreter code)
- if (re->DataAt(JSRegExp::code_index(is_ascii))->IsByteArray()) return true;
+ if (compiled_code->IsByteArray()) return true;
#endif
return CompileIrregexp(re, is_ascii);
}
@@ -414,33 +417,36 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
// Dispatch to the correct RegExp implementation.
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
+
#ifdef V8_NATIVE_REGEXP
-#if V8_TARGET_ARCH_IA32
+#ifdef V8_TARGET_ARCH_ARM
+ UNIMPLEMENTED();
+#else // Native regexp supported.
OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector();
- RegExpMacroAssemblerIA32::Result res;
+ NativeRegExpMacroAssembler::Result res;
do {
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
}
Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
- res = RegExpMacroAssemblerIA32::Match(code,
- subject,
- captures_vector,
- captures.length(),
- previous_index);
+ res = NativeRegExpMacroAssembler::Match(code,
+ subject,
+ captures_vector,
+ captures.length(),
+ previous_index);
// If result is RETRY, the string have changed representation, and we
// must restart from scratch.
- } while (res == RegExpMacroAssemblerIA32::RETRY);
- if (res == RegExpMacroAssemblerIA32::EXCEPTION) {
+ } while (res == NativeRegExpMacroAssembler::RETRY);
+ if (res == NativeRegExpMacroAssembler::EXCEPTION) {
ASSERT(Top::has_pending_exception());
return Handle<Object>::null();
}
- ASSERT(res == RegExpMacroAssemblerIA32::SUCCESS
- || res == RegExpMacroAssemblerIA32::FAILURE);
+ ASSERT(res == NativeRegExpMacroAssembler::SUCCESS
+ || res == NativeRegExpMacroAssembler::FAILURE);
- if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value();
+ if (res != NativeRegExpMacroAssembler::SUCCESS) return Factory::null_value();
array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
@@ -449,10 +455,9 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
-#else // !V8_TARGET_ARCH_IA32
- UNREACHABLE();
-#endif // V8_TARGET_ARCH_IA32
-#else // !V8_NATIVE_REGEXP
+#endif // Native regexp supported.
+
+#else // ! V8_NATIVE_REGEXP
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
@@ -4457,38 +4462,36 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
NodeInfo info = *node->info();
+ // Create the correct assembler for the architecture.
#ifdef V8_NATIVE_REGEXP
-#ifdef V8_TARGET_ARCH_ARM
- // ARM native regexp not implemented yet.
- UNREACHABLE();
-#endif
-#ifdef V8_TARGET_ARCH_X64
- // X64 native regexp not implemented yet.
- UNREACHABLE();
-#endif
+ // Native regexp implementation.
+
+ NativeRegExpMacroAssembler::Mode mode =
+ is_ascii ? NativeRegExpMacroAssembler::ASCII
+ : NativeRegExpMacroAssembler::UC16;
+
#ifdef V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32::Mode mode;
- if (is_ascii) {
- mode = RegExpMacroAssemblerIA32::ASCII;
- } else {
- mode = RegExpMacroAssemblerIA32::UC16;
- }
RegExpMacroAssemblerIA32 macro_assembler(mode,
(data->capture_count + 1) * 2);
- return compiler.Assemble(&macro_assembler,
- node,
- data->capture_count,
- pattern);
#endif
+#ifdef V8_TARGET_ARCH_X64
+ RegExpMacroAssemblerX64 macro_assembler(mode,
+ (data->capture_count + 1) * 2);
+#endif
+#ifdef V8_TARGET_ARCH_ARM
+ UNIMPLEMENTED();
+#endif
+
#else // ! V8_NATIVE_REGEXP
- // Interpreted regexp.
+ // Interpreted regexp implementation.
EmbeddedVector<byte, 1024> codes;
RegExpMacroAssemblerIrregexp macro_assembler(codes);
+#endif
+
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,
pattern);
-#endif // V8_NATIVE_REGEXP
}
}} // namespace v8::internal
diff --git a/V8Binding/v8/src/jump-target.cc b/V8Binding/v8/src/jump-target.cc
index 8168dd0..3782f92 100644
--- a/V8Binding/v8/src/jump-target.cc
+++ b/V8Binding/v8/src/jump-target.cc
@@ -323,25 +323,6 @@ void BreakTarget::CopyTo(BreakTarget* destination) {
}
-void BreakTarget::Jump() {
- ASSERT(cgen()->has_valid_frame());
-
- // Drop leftover statement state from the frame before merging.
- cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- ASSERT(cgen()->has_valid_frame());
-
- // Drop leftover statement state from the frame before merging.
- cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
void BreakTarget::Branch(Condition cc, Hint hint) {
ASSERT(cgen()->has_valid_frame());
@@ -362,48 +343,6 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
}
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
// -------------------------------------------------------------------------
// ShadowTarget implementation.
diff --git a/V8Binding/v8/src/math.js b/V8Binding/v8/src/math.js
index db75cb2..e3d266e 100644
--- a/V8Binding/v8/src/math.js
+++ b/V8Binding/v8/src/math.js
@@ -184,6 +184,7 @@ function MathTan(x) {
function SetupMath() {
// Setup math constants.
// ECMA-262, section 15.8.1.1.
+ %OptimizeObjectForAddingMultipleProperties($Math, 8);
%SetProperty($Math,
"E",
2.7182818284590452354,
@@ -219,6 +220,7 @@ function SetupMath() {
"SQRT2",
1.4142135623730951,
DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %TransformToFastProperties($Math);
// Setup non-enumerable functions of the Math object and
// set their names.
diff --git a/V8Binding/v8/src/messages.js b/V8Binding/v8/src/messages.js
index fd505ff..8328fe5 100644
--- a/V8Binding/v8/src/messages.js
+++ b/V8Binding/v8/src/messages.js
@@ -28,88 +28,36 @@
// -------------------------------------------------------------------
-const kVowelSounds = {a: true, e: true, i: true, o: true, u: true, y: true};
-const kCapitalVowelSounds = {a: true, e: true, i: true, o: true, u: true,
- h: true, f: true, l: true, m: true, n: true, r: true, s: true, x: true,
- y: true};
+// Lazily initialized.
+var kVowelSounds = 0;
+var kCapitalVowelSounds = 0;
+
function GetInstanceName(cons) {
if (cons.length == 0) {
return "";
}
var first = %StringToLowerCase(StringCharAt.call(cons, 0));
- var mapping = kVowelSounds;
+ if (kVowelSounds === 0) {
+ kVowelSounds = {a: true, e: true, i: true, o: true, u: true, y: true};
+ kCapitalVowelSounds = {a: true, e: true, i: true, o: true, u: true, h: true,
+ f: true, l: true, m: true, n: true, r: true, s: true, x: true, y: true};
+ }
+ var vowel_mapping = kVowelSounds;
if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
// First char is upper case
var second = %StringToLowerCase(StringCharAt.call(cons, 1));
// Second char is upper case
- if (StringCharAt.call(cons, 1) != second)
- mapping = kCapitalVowelSounds;
+ if (StringCharAt.call(cons, 1) != second) {
+ vowel_mapping = kCapitalVowelSounds;
+ }
}
- var s = mapping[first] ? "an " : "a ";
+ var s = vowel_mapping[first] ? "an " : "a ";
return s + cons;
}
-const kMessages = {
- // Error
- cyclic_proto: "Cyclic __proto__ value",
- // TypeError
- unexpected_token: "Unexpected token %0",
- unexpected_token_number: "Unexpected number",
- unexpected_token_string: "Unexpected string",
- unexpected_token_identifier: "Unexpected identifier",
- unexpected_eos: "Unexpected end of input",
- malformed_regexp: "Invalid regular expression: /%0/: %1",
- unterminated_regexp: "Invalid regular expression: missing /",
- regexp_flags: "Cannot supply flags when constructing one RegExp from another",
- invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
- invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
- invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",
- invalid_lhs_in_prefix_op: "Invalid left-hand side expression in prefix operation",
- multiple_defaults_in_switch: "More than one default clause in switch statement",
- newline_after_throw: "Illegal newline after throw",
- redeclaration: "%0 '%1' has already been declared",
- no_catch_or_finally: "Missing catch or finally after try",
- unknown_label: "Undefined label '%0'",
- uncaught_exception: "Uncaught %0",
- stack_trace: "Stack Trace:\n%0",
- called_non_callable: "%0 is not a function",
- undefined_method: "Object %1 has no method '%0'",
- property_not_function: "Property '%0' of object %1 is not a function",
- cannot_convert_to_primitive: "Cannot convert object to primitive value",
- not_constructor: "%0 is not a constructor",
- not_defined: "%0 is not defined",
- non_object_property_load: "Cannot read property '%0' of %1",
- non_object_property_store: "Cannot set property '%0' of %1",
- non_object_property_call: "Cannot call method '%0' of %1",
- with_expression: "%0 has no properties",
- illegal_invocation: "Illegal invocation",
- no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter",
- apply_non_function: "Function.prototype.apply was called on %0, which is a %1 and not a function",
- apply_wrong_args: "Function.prototype.apply: Arguments list has wrong type",
- invalid_in_operator_use: "Cannot use 'in' operator to search for '%0' in %1",
- instanceof_function_expected: "Expecting a function in instanceof check, but got %0",
- instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
- null_to_object: "Cannot convert null to object",
- reduce_no_initial: "Reduce of empty array with no initial value",
- // RangeError
- invalid_array_length: "Invalid array length",
- stack_overflow: "Maximum call stack size exceeded",
- apply_overflow: "Function.prototype.apply cannot support %0 arguments",
- // SyntaxError
- unable_to_parse: "Parse error",
- duplicate_regexp_flag: "Duplicate RegExp flag %0",
- invalid_regexp: "Invalid RegExp pattern /%0/",
- illegal_break: "Illegal break statement",
- illegal_continue: "Illegal continue statement",
- illegal_return: "Illegal return statement",
- error_loading_debugger: "Error loading debugger %0",
- no_input_to_regexp: "No input to %0",
- result_not_primitive: "Result of %0 must be a primitive, was %1",
- invalid_json: "String '%0' is not valid JSON",
- circular_structure: "Converting circular structure to JSON"
-};
+var kMessages = 0;
function FormatString(format, args) {
@@ -161,6 +109,67 @@ function MakeGenericError(constructor, type, args) {
// Helper functions; called from the runtime system.
function FormatMessage(message) {
+ if (kMessages === 0) {
+ kMessages = {
+ // Error
+ cyclic_proto: "Cyclic __proto__ value",
+ // TypeError
+ unexpected_token: "Unexpected token %0",
+ unexpected_token_number: "Unexpected number",
+ unexpected_token_string: "Unexpected string",
+ unexpected_token_identifier: "Unexpected identifier",
+ unexpected_eos: "Unexpected end of input",
+ malformed_regexp: "Invalid regular expression: /%0/: %1",
+ unterminated_regexp: "Invalid regular expression: missing /",
+ regexp_flags: "Cannot supply flags when constructing one RegExp from another",
+ invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
+ invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
+ invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",
+ invalid_lhs_in_prefix_op: "Invalid left-hand side expression in prefix operation",
+ multiple_defaults_in_switch: "More than one default clause in switch statement",
+ newline_after_throw: "Illegal newline after throw",
+ redeclaration: "%0 '%1' has already been declared",
+ no_catch_or_finally: "Missing catch or finally after try",
+ unknown_label: "Undefined label '%0'",
+ uncaught_exception: "Uncaught %0",
+ stack_trace: "Stack Trace:\n%0",
+ called_non_callable: "%0 is not a function",
+ undefined_method: "Object %1 has no method '%0'",
+ property_not_function: "Property '%0' of object %1 is not a function",
+ cannot_convert_to_primitive: "Cannot convert object to primitive value",
+ not_constructor: "%0 is not a constructor",
+ not_defined: "%0 is not defined",
+ non_object_property_load: "Cannot read property '%0' of %1",
+ non_object_property_store: "Cannot set property '%0' of %1",
+ non_object_property_call: "Cannot call method '%0' of %1",
+ with_expression: "%0 has no properties",
+ illegal_invocation: "Illegal invocation",
+ no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter",
+ apply_non_function: "Function.prototype.apply was called on %0, which is a %1 and not a function",
+ apply_wrong_args: "Function.prototype.apply: Arguments list has wrong type",
+ invalid_in_operator_use: "Cannot use 'in' operator to search for '%0' in %1",
+ instanceof_function_expected: "Expecting a function in instanceof check, but got %0",
+ instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
+ null_to_object: "Cannot convert null to object",
+ reduce_no_initial: "Reduce of empty array with no initial value",
+ // RangeError
+ invalid_array_length: "Invalid array length",
+ stack_overflow: "Maximum call stack size exceeded",
+ apply_overflow: "Function.prototype.apply cannot support %0 arguments",
+ // SyntaxError
+ unable_to_parse: "Parse error",
+ duplicate_regexp_flag: "Duplicate RegExp flag %0",
+ invalid_regexp: "Invalid RegExp pattern /%0/",
+ illegal_break: "Illegal break statement",
+ illegal_continue: "Illegal continue statement",
+ illegal_return: "Illegal return statement",
+ error_loading_debugger: "Error loading debugger %0",
+ no_input_to_regexp: "No input to %0",
+ result_not_primitive: "Result of %0 must be a primitive, was %1",
+ invalid_json: "String '%0' is not valid JSON",
+ circular_structure: "Converting circular structure to JSON"
+ };
+ }
var format = kMessages[message.type];
if (!format) return "<unknown message " + message.type + ">";
return FormatString(format, message.args);
diff --git a/V8Binding/v8/src/parser.cc b/V8Binding/v8/src/parser.cc
index 348c12a..e3b72a3 100644
--- a/V8Binding/v8/src/parser.cc
+++ b/V8Binding/v8/src/parser.cc
@@ -97,7 +97,7 @@ class Parser {
// Pre-parse the program from the character stream; returns true on
// success, false if a stack-overflow happened during parsing.
- bool PreParseProgram(unibrow::CharacterStream* stream);
+ bool PreParseProgram(Handle<String> source, unibrow::CharacterStream* stream);
void ReportMessage(const char* message, Vector<const char*> args);
virtual void ReportMessageAt(Scanner::Location loc,
@@ -1167,13 +1167,14 @@ Parser::Parser(Handle<Script> script,
}
-bool Parser::PreParseProgram(unibrow::CharacterStream* stream) {
+bool Parser::PreParseProgram(Handle<String> source,
+ unibrow::CharacterStream* stream) {
HistogramTimerScope timer(&Counters::pre_parse);
StackGuard guard;
AssertNoZoneAllocation assert_no_zone_allocation;
AssertNoAllocation assert_no_allocation;
NoHandleAllocation no_handle_allocation;
- scanner_.Init(Handle<String>(), stream, 0);
+ scanner_.Init(source, stream, 0);
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
DummyScope top_scope;
@@ -4593,7 +4594,8 @@ unsigned* ScriptDataImpl::Data() {
}
-ScriptDataImpl* PreParse(unibrow::CharacterStream* stream,
+ScriptDataImpl* PreParse(Handle<String> source,
+ unibrow::CharacterStream* stream,
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_natives_syntax =
@@ -4601,7 +4603,7 @@ ScriptDataImpl* PreParse(unibrow::CharacterStream* stream,
FLAG_allow_natives_syntax ||
Bootstrapper::IsActive();
PreParser parser(no_script, allow_natives_syntax, extension);
- if (!parser.PreParseProgram(stream)) return NULL;
+ if (!parser.PreParseProgram(source, stream)) return NULL;
// The list owns the backing store so we need to clone the vector.
// That way, the result will be exactly the right size rather than
// the expected 50% too large.
diff --git a/V8Binding/v8/src/parser.h b/V8Binding/v8/src/parser.h
index c029c4b..86e1f74 100644
--- a/V8Binding/v8/src/parser.h
+++ b/V8Binding/v8/src/parser.h
@@ -143,7 +143,8 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
ScriptDataImpl* pre_data);
-ScriptDataImpl* PreParse(unibrow::CharacterStream* stream,
+ScriptDataImpl* PreParse(Handle<String> source,
+ unibrow::CharacterStream* stream,
v8::Extension* extension);
diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
index eea3c23..21b622e 100644
--- a/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp.cc
@@ -375,37 +375,6 @@ void RegExpMacroAssemblerIrregexp::CheckNotRegistersEqual(int reg1,
}
-void RegExpMacroAssemblerIrregexp::CheckBitmap(uc16 start,
- Label* bitmap,
- Label* on_zero) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIrregexp::DispatchHalfNibbleMap(
- uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& table) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIrregexp::DispatchByteMap(
- uc16 start,
- Label* byte_map,
- const Vector<Label*>& table) {
- UNIMPLEMENTED();
-}
-
-
-void RegExpMacroAssemblerIrregexp::DispatchHighByteMap(
- byte start,
- Label* byte_map,
- const Vector<Label*>& table) {
- UNIMPLEMENTED();
-}
-
-
void RegExpMacroAssemblerIrregexp::CheckCharacters(
Vector<const uc16> str,
int cp_offset,
diff --git a/V8Binding/v8/src/regexp-macro-assembler-irregexp.h b/V8Binding/v8/src/regexp-macro-assembler-irregexp.h
index 597046c..dd64e7a 100644
--- a/V8Binding/v8/src/regexp-macro-assembler-irregexp.h
+++ b/V8Binding/v8/src/regexp-macro-assembler-irregexp.h
@@ -52,7 +52,6 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
virtual void Bind(Label* label);
- virtual void EmitOrLink(Label* label);
virtual void AdvanceCurrentPosition(int by); // Signed cp change.
virtual void PopCurrentPosition();
virtual void PushCurrentPosition();
@@ -100,16 +99,6 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
int cp_offset,
Label* on_failure,
bool check_end_of_string);
- virtual void CheckBitmap(uc16 start, Label* bitmap, Label* on_zero);
- virtual void DispatchHalfNibbleMap(uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations);
- virtual void DispatchByteMap(uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations);
- virtual void DispatchHighByteMap(byte start,
- Label* byte_map,
- const Vector<Label*>& destinations);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
virtual void IfRegisterEqPos(int register_index, Label* if_eq);
@@ -119,6 +108,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
private:
void Expand();
// Code and bitmap emission.
+ inline void EmitOrLink(Label* label);
inline void Emit32(uint32_t x);
inline void Emit16(uint32_t x);
inline void Emit(uint32_t bc, uint32_t arg);
diff --git a/V8Binding/v8/src/regexp-macro-assembler-tracer.cc b/V8Binding/v8/src/regexp-macro-assembler-tracer.cc
index 30eb485..0aad337 100644
--- a/V8Binding/v8/src/regexp-macro-assembler-tracer.cc
+++ b/V8Binding/v8/src/regexp-macro-assembler-tracer.cc
@@ -53,12 +53,6 @@ void RegExpMacroAssemblerTracer::Bind(Label* label) {
}
-void RegExpMacroAssemblerTracer::EmitOrLink(Label* label) {
- PrintF(" EmitOrLink(label[%08x]);\n", label);
- assembler_->EmitOrLink(label);
-}
-
-
void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
PrintF(" AdvanceCurrentPosition(by=%d);\n", by);
assembler_->AdvanceCurrentPosition(by);
@@ -311,13 +305,6 @@ void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
}
-void RegExpMacroAssemblerTracer::CheckBitmap(uc16 start, Label* bitmap,
- Label* on_zero) {
- PrintF(" CheckBitmap(start=u%04x, <bitmap>, label[%08x]);\n", start, on_zero);
- assembler_->CheckBitmap(start, bitmap, on_zero);
-}
-
-
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
int cp_offset,
@@ -338,51 +325,6 @@ bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
}
-void RegExpMacroAssemblerTracer::DispatchHalfNibbleMap(
- uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations) {
- PrintF(" DispatchHalfNibbleMap(start=u%04x, <half_nibble_map>, [", start);
- for (int i = 0; i < destinations.length(); i++) {
- if (i > 0)
- PrintF(", ");
- PrintF("label[%08x]", destinations[i]);
- }
- PrintF(");\n");
- assembler_->DispatchHalfNibbleMap(start, half_nibble_map, destinations);
-}
-
-
-void RegExpMacroAssemblerTracer::DispatchByteMap(
- uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations) {
- PrintF(" DispatchByteMap(start=u%04x, <byte_map>, [", start);
- for (int i = 0; i < destinations.length(); i++) {
- if (i > 0)
- PrintF(", ");
- PrintF("label[%08x]", destinations[i]);
- }
- PrintF(");\n");
- assembler_->DispatchByteMap(start, byte_map, destinations);
-}
-
-
-void RegExpMacroAssemblerTracer::DispatchHighByteMap(
- byte start,
- Label* byte_map,
- const Vector<Label*>& destinations) {
- PrintF(" DispatchHighByteMap(start=u%04x, <byte_map>, [", start);
- for (int i = 0; i < destinations.length(); i++) {
- if (i > 0)
- PrintF(", ");
- PrintF("label[%08x]", destinations[i]);
- }
- PrintF(");\n");
- assembler_->DispatchHighByteMap(start, byte_map, destinations);
-}
-
-
void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
int comparand, Label* if_lt) {
PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
diff --git a/V8Binding/v8/src/regexp-macro-assembler-tracer.h b/V8Binding/v8/src/regexp-macro-assembler-tracer.h
index 0fd73f3..28434d7 100644
--- a/V8Binding/v8/src/regexp-macro-assembler-tracer.h
+++ b/V8Binding/v8/src/regexp-macro-assembler-tracer.h
@@ -43,7 +43,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckBitmap(uc16 start, Label* bitmap, Label* on_zero);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t and_with,
@@ -73,19 +72,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
int cp_offset,
bool check_offset,
Label* on_no_match);
- virtual void DispatchByteMap(
- uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations);
- virtual void DispatchHalfNibbleMap(
- uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations);
- virtual void DispatchHighByteMap(
- byte start,
- Label* byte_map,
- const Vector<Label*>& destinations);
- virtual void EmitOrLink(Label* label);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
diff --git a/V8Binding/v8/src/regexp-macro-assembler.cc b/V8Binding/v8/src/regexp-macro-assembler.cc
index 8dede30..7f830fe 100644
--- a/V8Binding/v8/src/regexp-macro-assembler.cc
+++ b/V8Binding/v8/src/regexp-macro-assembler.cc
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <string.h>
#include "v8.h"
#include "ast.h"
#include "assembler.h"
+#include "regexp-stack.h"
#include "regexp-macro-assembler.h"
namespace v8 {
@@ -42,38 +42,176 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
}
-ByteArrayProvider::ByteArrayProvider(unsigned int initial_size)
- : byte_array_size_(initial_size),
- current_byte_array_(),
- current_byte_array_free_offset_(initial_size) {}
+#ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM.
+
+NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
+}
-ArraySlice ByteArrayProvider::GetBuffer(unsigned int size,
- unsigned int elem_size) {
- ASSERT(size > 0);
- size_t byte_size = size * elem_size;
- int free_offset = current_byte_array_free_offset_;
- // align elements
- free_offset += elem_size - 1;
- free_offset = free_offset - (free_offset % elem_size);
+NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
+}
- if (free_offset + byte_size > byte_array_size_) {
- if (byte_size > (byte_array_size_ / 2)) {
- Handle<ByteArray> solo_buffer(Factory::NewByteArray(byte_size, TENURED));
- return ArraySlice(solo_buffer, 0);
+const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
+ String* subject,
+ int start_index) {
+ // Not just flat, but ultra flat.
+ ASSERT(subject->IsExternalString() || subject->IsSeqString());
+ ASSERT(start_index >= 0);
+ ASSERT(start_index <= subject->length());
+ if (subject->IsAsciiRepresentation()) {
+ const byte* address;
+ if (StringShape(subject).IsExternal()) {
+ const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+ address = reinterpret_cast<const byte*>(data);
+ } else {
+ ASSERT(subject->IsSeqAsciiString());
+ char* data = SeqAsciiString::cast(subject)->GetChars();
+ address = reinterpret_cast<const byte*>(data);
}
- current_byte_array_ = Factory::NewByteArray(byte_array_size_, TENURED);
- free_offset = 0;
+ return address + start_index;
+ }
+ const uc16* data;
+ if (StringShape(subject).IsExternal()) {
+ data = ExternalTwoByteString::cast(subject)->resource()->data();
+ } else {
+ ASSERT(subject->IsSeqTwoByteString());
+ data = SeqTwoByteString::cast(subject)->GetChars();
}
- current_byte_array_free_offset_ = free_offset + byte_size;
- return ArraySlice(current_byte_array_, free_offset);
+ return reinterpret_cast<const byte*>(data + start_index);
}
-template <typename T>
-ArraySlice ByteArrayProvider::GetBuffer(Vector<T> values) {
- ArraySlice slice = GetBuffer(values.length(), sizeof(T));
- memcpy(slice.location(), values.start(), values.length() * sizeof(T));
- return slice;
+NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
+ Handle<Code> regexp_code,
+ Handle<String> subject,
+ int* offsets_vector,
+ int offsets_vector_length,
+ int previous_index) {
+
+ ASSERT(subject->IsFlat());
+ ASSERT(previous_index >= 0);
+ ASSERT(previous_index <= subject->length());
+
+ // No allocations before calling the regexp, but we can't use
+ // AssertNoAllocation, since regexps might be preempted, and another thread
+ // might do allocation anyway.
+
+ String* subject_ptr = *subject;
+ // Character offsets into string.
+ int start_offset = previous_index;
+ int end_offset = subject_ptr->length();
+
+ bool is_ascii = subject->IsAsciiRepresentation();
+
+ if (StringShape(subject_ptr).IsCons()) {
+ subject_ptr = ConsString::cast(subject_ptr)->first();
+ } else if (StringShape(subject_ptr).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(subject_ptr);
+ start_offset += slice->start();
+ end_offset += slice->start();
+ subject_ptr = slice->buffer();
+ }
+ // Ensure that an underlying string has the same ascii-ness.
+ ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
+ ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
+ // String is now either Sequential or External
+ int char_size_shift = is_ascii ? 0 : 1;
+ int char_length = end_offset - start_offset;
+
+ const byte* input_start =
+ StringCharacterPosition(subject_ptr, start_offset);
+ int byte_length = char_length << char_size_shift;
+ const byte* input_end = input_start + byte_length;
+ Result res = Execute(*regexp_code,
+ subject_ptr,
+ start_offset,
+ input_start,
+ input_end,
+ offsets_vector,
+ previous_index == 0);
+
+ if (res == SUCCESS) {
+ // Capture values are relative to start_offset only.
+ // Convert them to be relative to start of string.
+ for (int i = 0; i < offsets_vector_length; i++) {
+ if (offsets_vector[i] >= 0) {
+ offsets_vector[i] += previous_index;
+ }
+ }
+ }
+
+ return res;
}
+
+
+NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
+ Code* code,
+ String* input,
+ int start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ bool at_start) {
+ typedef int (*matcher)(String*, int, const byte*,
+ const byte*, int*, int, Address);
+ matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
+
+ int at_start_val = at_start ? 1 : 0;
+
+ // Ensure that the minimum stack has been allocated.
+ RegExpStack stack;
+ Address stack_base = RegExpStack::stack_base();
+
+ int result = matcher_func(input,
+ start_offset,
+ input_start,
+ input_end,
+ output,
+ at_start_val,
+ stack_base);
+ ASSERT(result <= SUCCESS);
+ ASSERT(result >= RETRY);
+
+ if (result == EXCEPTION && !Top::has_pending_exception()) {
+ // We detected a stack overflow (on the backtrack stack) in RegExp code,
+ // but haven't created the exception yet.
+ Top::StackOverflow();
+ }
+ return static_cast<Result>(result);
+}
+
+
+static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
+
+int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
+ Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length) {
+ // This function is not allowed to cause a garbage collection.
+ // A GC might move the calling generated code and invalidate the
+ // return address on the stack.
+ ASSERT(byte_length % 2 == 0);
+ uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
+ uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
+ size_t length = byte_length >> 1;
+
+ for (size_t i = 0; i < length; i++) {
+ unibrow::uchar c1 = substring1[i];
+ unibrow::uchar c2 = substring2[i];
+ if (c1 != c2) {
+ unibrow::uchar s1[1] = { c1 };
+ canonicalize.get(c1, '\0', s1);
+ if (s1[0] != c2) {
+ unibrow::uchar s2[1] = { c2 };
+ canonicalize.get(c2, '\0', s2);
+ if (s1[0] != s2[0]) {
+ return 0;
+ }
+ }
+ }
+ }
+ return 1;
+}
+
+#endif // V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/V8Binding/v8/src/regexp-macro-assembler.h b/V8Binding/v8/src/regexp-macro-assembler.h
index 4849864..e590827 100644
--- a/V8Binding/v8/src/regexp-macro-assembler.h
+++ b/V8Binding/v8/src/regexp-macro-assembler.h
@@ -46,6 +46,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kX64Implementation,
kBytecodeImplementation
};
@@ -67,12 +68,6 @@ class RegExpMacroAssembler {
virtual void Backtrack() = 0;
virtual void Bind(Label* label) = 0;
virtual void CheckAtStart(Label* on_at_start) = 0;
- // Check the current character against a bitmap. The range of the current
- // character must be from start to start + length_of_bitmap_in_bits.
- virtual void CheckBitmap(
- uc16 start, // The bitmap is indexed from this character.
- Label* bitmap, // Where the bitmap is emitted.
- Label* on_zero) = 0; // Where to go if the bit is 0. Fall through on 1.
// Dispatch after looking the current character up in a 2-bits-per-entry
// map. The destinations vector has up to 4 labels.
virtual void CheckCharacter(uint32_t c, Label* on_equal) = 0;
@@ -132,23 +127,6 @@ class RegExpMacroAssembler {
Label* on_no_match) {
return false;
}
- // Dispatch after looking the current character up in a byte map. The
- // destinations vector has up to 256 labels.
- virtual void DispatchByteMap(
- uc16 start,
- Label* byte_map,
- const Vector<Label*>& destinations) = 0;
- virtual void DispatchHalfNibbleMap(
- uc16 start,
- Label* half_nibble_map,
- const Vector<Label*>& destinations) = 0;
- // Dispatch after looking the high byte of the current character up in a byte
- // map. The destinations vector has up to 256 labels.
- virtual void DispatchHighByteMap(
- byte start,
- Label* byte_map,
- const Vector<Label*>& destinations) = 0;
- virtual void EmitOrLink(Label* label) = 0;
virtual void Fail() = 0;
virtual Handle<Object> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
@@ -181,51 +159,53 @@ class RegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
-
- private:
};
-struct ArraySlice {
+#ifdef V8_NATIVE_REGEXP // Avoid compiling unused code.
+
+class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
public:
- ArraySlice(Handle<ByteArray> array, size_t offset)
- : array_(array), offset_(offset) {}
- Handle<ByteArray> array() { return array_; }
- // Offset in the byte array data.
- size_t offset() { return offset_; }
- // Offset from the ByteArray pointer.
- size_t base_offset() {
- return ByteArray::kHeaderSize - kHeapObjectTag + offset_;
- }
- void* location() {
- return reinterpret_cast<void*>(array_->GetDataStartAddress() + offset_);
- }
- template <typename T>
- T& at(int idx) {
- return reinterpret_cast<T*>(array_->GetDataStartAddress() + offset_)[idx];
- }
- private:
- Handle<ByteArray> array_;
- size_t offset_;
-};
+ // Type of input string to generate code for.
+ enum Mode { ASCII = 1, UC16 = 2 };
+ // Result of calling generated native RegExp code.
+ // RETRY: Something significant changed during execution, and the matching
+ // should be retried from scratch.
+ // EXCEPTION: Something failed during execution. If no exception has been
+ // thrown, it's an internal out-of-memory, and the caller should
+ // throw the exception.
+ // FAILURE: Matching failed.
+ // SUCCESS: Matching succeeded, and the output array has been filled with
+ // capture positions.
+ enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
-class ByteArrayProvider {
- public:
- explicit ByteArrayProvider(unsigned int initial_size);
- // Provides a place to put "size" elements of size "element_size".
- // The information can be stored in the provided ByteArray at the "offset".
- // The offset is aligned to the element size.
- ArraySlice GetBuffer(unsigned int size,
- unsigned int element_size);
- template <typename T>
- ArraySlice GetBuffer(Vector<T> values);
- private:
- size_t byte_array_size_;
- Handle<ByteArray> current_byte_array_;
- int current_byte_array_free_offset_;
-};
+ NativeRegExpMacroAssembler();
+ virtual ~NativeRegExpMacroAssembler();
+
+ static Result Match(Handle<Code> regexp,
+ Handle<String> subject,
+ int* offsets_vector,
+ int offsets_vector_length,
+ int previous_index);
+ // Compares two-byte strings case insensitively.
+ // Called from generated RegExp code.
+ static int CaseInsensitiveCompareUC16(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length);
+
+ static const byte* StringCharacterPosition(String* subject, int start_index);
+
+ static Result Execute(Code* code,
+ String* input,
+ int start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ bool at_start);
+};
+#endif // V8_NATIVE_REGEXP
} } // namespace v8::internal
#endif // V8_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/V8Binding/v8/src/regexp-stack.h b/V8Binding/v8/src/regexp-stack.h
index 6c090da..99cf33c 100644
--- a/V8Binding/v8/src/regexp-stack.h
+++ b/V8Binding/v8/src/regexp-stack.h
@@ -48,7 +48,7 @@ class RegExpStack {
~RegExpStack(); // Releases the stack if it has grown.
// Gives the top of the memory used as stack.
- static Address stack_top() {
+ static Address stack_base() {
ASSERT(thread_local_.memory_size_ != 0);
return thread_local_.memory_ + thread_local_.memory_size_;
}
@@ -74,7 +74,7 @@ class RegExpStack {
private:
// Artificial limit used when no memory has been allocated.
- static const uint32_t kMemoryTop = 0xffffffff;
+ static const uintptr_t kMemoryTop = static_cast<uintptr_t>(-1);
// Minimal size of allocated stack area.
static const size_t kMinimumStackSize = 1 * KB;
diff --git a/V8Binding/v8/src/runtime.cc b/V8Binding/v8/src/runtime.cc
index 56e9f85..36fdd65 100644
--- a/V8Binding/v8/src/runtime.cc
+++ b/V8Binding/v8/src/runtime.cc
@@ -1022,6 +1022,30 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
}
+static Object* Runtime_OptimizeObjectForAddingMultipleProperties(
+ Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_SMI_CHECKED(properties, args[1]);
+ if (object->HasFastProperties()) {
+ NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+ }
+ return *object;
+}
+
+
+static Object* Runtime_TransformToFastProperties(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, object, 0);
+ if (!object->HasFastProperties() && !object->IsGlobalObject()) {
+ TransformToFastProperties(object, 0);
+ }
+ return *object;
+}
+
+
static Object* Runtime_RegExpExec(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 4);
@@ -3075,7 +3099,7 @@ static Object* Runtime_Typeof(Arguments args) {
}
ASSERT(heap_obj->IsUndefined());
return Heap::undefined_symbol();
- case JS_FUNCTION_TYPE:
+ case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
return Heap::function_symbol();
default:
// For any kind of object not handled above, the spec rule for
@@ -4973,10 +4997,12 @@ static Object* Runtime_CompileString(Arguments args) {
// Compile source string in the global context.
Handle<Context> context(Top::context()->global_context());
+ Compiler::ValidationState validate = (is_json->IsTrue())
+ ? Compiler::VALIDATE_JSON : Compiler::DONT_VALIDATE_JSON;
Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
context,
true,
- is_json->IsTrue());
+ validate);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -5000,8 +5026,11 @@ static Object* CompileDirectEval(Handle<String> source) {
bool is_global = context->IsGlobalContext();
// Compile source string in the current context.
- Handle<JSFunction> boilerplate =
- Compiler::CompileEval(source, context, is_global, false);
+ Handle<JSFunction> boilerplate = Compiler::CompileEval(
+ source,
+ context,
+ is_global,
+ Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -7043,7 +7072,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
Compiler::CompileEval(function_source,
context,
context->IsGlobalContext(),
- false);
+ Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -7111,7 +7140,7 @@ static Object* Runtime_DebugEvaluateGlobal(Arguments args) {
Handle<JSFunction>(Compiler::CompileEval(source,
context,
true,
- false));
+ Compiler::DONT_VALIDATE_JSON));
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate,
diff --git a/V8Binding/v8/src/runtime.h b/V8Binding/v8/src/runtime.h
index cdf21dc..d47ca18 100644
--- a/V8Binding/v8/src/runtime.h
+++ b/V8Binding/v8/src/runtime.h
@@ -247,6 +247,8 @@ namespace internal {
F(InitializeVarGlobal, -1 /* 1 or 2 */) \
F(InitializeConstGlobal, 2) \
F(InitializeConstContextSlot, 3) \
+ F(OptimizeObjectForAddingMultipleProperties, 2) \
+ F(TransformToFastProperties, 1) \
\
/* Debugging */ \
F(DebugPrint, 1) \
diff --git a/V8Binding/v8/src/scanner.cc b/V8Binding/v8/src/scanner.cc
index 24a6d4b..720dc35 100644
--- a/V8Binding/v8/src/scanner.cc
+++ b/V8Binding/v8/src/scanner.cc
@@ -92,33 +92,35 @@ void UTF8Buffer::AddCharSlow(uc32 c) {
UTF16Buffer::UTF16Buffer()
- : pos_(0),
- pushback_buffer_(0),
- last_(0),
- stream_(NULL) { }
+ : pos_(0), size_(0) { }
-void UTF16Buffer::Initialize(Handle<String> data,
- unibrow::CharacterStream* input) {
- data_ = data;
- pos_ = 0;
- stream_ = input;
+Handle<String> UTF16Buffer::SubString(int start, int end) {
+ return internal::SubString(data_, start, end);
}
-Handle<String> UTF16Buffer::SubString(int start, int end) {
- return internal::SubString(data_, start, end);
+// CharacterStreamUTF16Buffer
+CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
+ : pushback_buffer_(0), last_(0), stream_(NULL) { }
+
+
+void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
+ unibrow::CharacterStream* input) {
+ data_ = data;
+ pos_ = 0;
+ stream_ = input;
}
-void UTF16Buffer::PushBack(uc32 ch) {
+void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
pushback_buffer()->Add(last_);
last_ = ch;
pos_--;
}
-uc32 UTF16Buffer::Advance() {
+uc32 CharacterStreamUTF16Buffer::Advance() {
// NOTE: It is of importance to Persian / Farsi resources that we do
// *not* strip format control characters in the scanner; see
//
@@ -135,7 +137,7 @@ uc32 UTF16Buffer::Advance() {
uc32 next = stream_->GetNext();
return last_ = next;
} else {
- // note: currently the following increment is necessary to avoid a
+ // Note: currently the following increment is necessary to avoid a
// test-parser problem!
pos_++;
return last_ = static_cast<uc32>(-1);
@@ -143,13 +145,53 @@ uc32 UTF16Buffer::Advance() {
}
-void UTF16Buffer::SeekForward(int pos) {
+void CharacterStreamUTF16Buffer::SeekForward(int pos) {
pos_ = pos;
ASSERT(pushback_buffer()->is_empty());
stream_->Seek(pos);
}
+// TwoByteStringUTF16Buffer
+TwoByteStringUTF16Buffer::TwoByteStringUTF16Buffer()
+ : raw_data_(NULL) { }
+
+
+void TwoByteStringUTF16Buffer::Initialize(
+ Handle<ExternalTwoByteString> data) {
+ ASSERT(!data.is_null());
+
+ data_ = data;
+ pos_ = 0;
+
+ raw_data_ = data->resource()->data();
+ size_ = data->length();
+}
+
+
+uc32 TwoByteStringUTF16Buffer::Advance() {
+ if (pos_ < size_) {
+ return raw_data_[pos_++];
+ } else {
+ // note: currently the following increment is necessary to avoid a
+ // test-parser problem!
+ pos_++;
+ return static_cast<uc32>(-1);
+ }
+}
+
+
+void TwoByteStringUTF16Buffer::PushBack(uc32 ch) {
+ pos_--;
+ ASSERT(pos_ >= 0 && raw_data_[pos_] == ch);
+}
+
+
+void TwoByteStringUTF16Buffer::SeekForward(int pos) {
+ pos_ = pos;
+}
+
+
// ----------------------------------------------------------------------------
// Scanner
@@ -161,7 +203,15 @@ Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) {
void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
int position) {
// Initialize the source buffer.
- source_.Initialize(source, stream);
+ if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
+ two_byte_string_buffer_.Initialize(
+ Handle<ExternalTwoByteString>::cast(source));
+ source_ = &two_byte_string_buffer_;
+ } else {
+ char_stream_buffer_.Initialize(source, stream);
+ source_ = &char_stream_buffer_;
+ }
+
position_ = position;
// Reset literals buffer
@@ -180,7 +230,7 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
Handle<String> Scanner::SubString(int start, int end) {
- return source_.SubString(start - position_, end - position_);
+ return source_->SubString(start - position_, end - position_);
}
@@ -223,17 +273,6 @@ void Scanner::AddCharAdvance() {
}
-void Scanner::Advance() {
- c0_ = source_.Advance();
-}
-
-
-void Scanner::PushBack(uc32 ch) {
- source_.PushBack(ch);
- c0_ = ch;
-}
-
-
static inline bool IsByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
@@ -583,7 +622,7 @@ void Scanner::Scan() {
void Scanner::SeekForward(int pos) {
- source_.SeekForward(pos - 1);
+ source_->SeekForward(pos - 1);
Advance();
Scan();
}
diff --git a/V8Binding/v8/src/scanner.h b/V8Binding/v8/src/scanner.h
index eea23a7..340da86 100644
--- a/V8Binding/v8/src/scanner.h
+++ b/V8Binding/v8/src/scanner.h
@@ -73,24 +73,53 @@ class UTF8Buffer {
class UTF16Buffer {
public:
UTF16Buffer();
+ virtual ~UTF16Buffer() {}
+
+ virtual void PushBack(uc32 ch) = 0;
+ // returns a value < 0 when the buffer end is reached
+ virtual uc32 Advance() = 0;
+ virtual void SeekForward(int pos) = 0;
- void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
- void PushBack(uc32 ch);
- uc32 Advance(); // returns a value < 0 when the buffer end is reached
- uint16_t CharAt(int index);
int pos() const { return pos_; }
int size() const { return size_; }
Handle<String> SubString(int start, int end);
- List<uc32>* pushback_buffer() { return &pushback_buffer_; }
- void SeekForward(int pos);
- private:
+ protected:
Handle<String> data_;
int pos_;
int size_;
+};
+
+
+class CharacterStreamUTF16Buffer: public UTF16Buffer {
+ public:
+ CharacterStreamUTF16Buffer();
+ virtual ~CharacterStreamUTF16Buffer() {}
+ void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
+ virtual void PushBack(uc32 ch);
+ virtual uc32 Advance();
+ virtual void SeekForward(int pos);
+
+ private:
List<uc32> pushback_buffer_;
uc32 last_;
unibrow::CharacterStream* stream_;
+
+ List<uc32>* pushback_buffer() { return &pushback_buffer_; }
+};
+
+
+class TwoByteStringUTF16Buffer: public UTF16Buffer {
+ public:
+ TwoByteStringUTF16Buffer();
+ virtual ~TwoByteStringUTF16Buffer() {}
+ void Initialize(Handle<ExternalTwoByteString> data);
+ virtual void PushBack(uc32 ch);
+ virtual uc32 Advance();
+ virtual void SeekForward(int pos);
+
+ private:
+ const uint16_t* raw_data_;
};
@@ -184,8 +213,11 @@ class Scanner {
static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
private:
+ CharacterStreamUTF16Buffer char_stream_buffer_;
+ TwoByteStringUTF16Buffer two_byte_string_buffer_;
+
// Source.
- UTF16Buffer source_;
+ UTF16Buffer* source_;
int position_;
// Buffer to hold literal values (identifiers, strings, numbers)
@@ -219,8 +251,11 @@ class Scanner {
void TerminateLiteral();
// Low-level scanning support.
- void Advance();
- void PushBack(uc32 ch);
+ void Advance() { c0_ = source_->Advance(); }
+ void PushBack(uc32 ch) {
+ source_->PushBack(ch);
+ c0_ = ch;
+ }
bool SkipWhiteSpace();
Token::Value SkipSingleLineComment();
@@ -243,7 +278,7 @@ class Scanner {
// Return the current source position.
int source_pos() {
- return source_.pos() - kCharacterLookaheadBufferSize + position_;
+ return source_->pos() - kCharacterLookaheadBufferSize + position_;
}
// Decodes a unicode escape-sequence which is part of an identifier.
diff --git a/V8Binding/v8/src/spaces.cc b/V8Binding/v8/src/spaces.cc
index 4f8119f..67d3482 100644
--- a/V8Binding/v8/src/spaces.cc
+++ b/V8Binding/v8/src/spaces.cc
@@ -340,6 +340,17 @@ bool MemoryAllocator::CommitBlock(Address start,
return true;
}
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+ ASSERT(start != NULL);
+ ASSERT(size > 0);
+ ASSERT(initial_chunk_ != NULL);
+ ASSERT(InInitialChunk(start));
+ ASSERT(InInitialChunk(start + size - 1));
+
+ if (!initial_chunk_->Uncommit(start, size)) return false;
+ Counters::memory_allocated.Decrement(size);
+ return true;
+}
Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner) {
@@ -715,47 +726,25 @@ void PagedSpace::Shrink() {
Page* top_page = AllocationTopPage();
ASSERT(top_page->is_valid());
- // Loop over the pages from the top page to the end of the space to count
- // the number of pages to keep and find the last page to keep.
- int free_pages = 0;
- int pages_to_keep = 0; // Of the free pages.
- Page* last_page_to_keep = top_page;
- Page* current_page = top_page->next_page();
- // Loop over the pages to the end of the space.
- while (current_page->is_valid()) {
-#if defined(ANDROID)
- // Free all chunks if possible
-#else
- // Advance last_page_to_keep every other step to end up at the midpoint.
- if ((free_pages & 0x1) == 1) {
- pages_to_keep++;
- last_page_to_keep = last_page_to_keep->next_page();
- }
-#endif
- free_pages++;
- current_page = current_page->next_page();
+ // Count the number of pages we would like to free.
+ int pages_to_free = 0;
+ for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+ pages_to_free++;
}
- // Free pages after last_page_to_keep, and adjust the next_page link.
- Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
- MemoryAllocator::SetNextPage(last_page_to_keep, p);
+ // Free pages after top_page.
+ Page* p = MemoryAllocator::FreePages(top_page->next_page());
+ MemoryAllocator::SetNextPage(top_page, p);
- // Since pages are only freed in whole chunks, we may have kept more
- // than pages_to_keep. Count the extra pages and cache the new last
- // page in the space.
- last_page_ = last_page_to_keep;
- while (p->is_valid()) {
- pages_to_keep++;
+ // Find out how many pages we failed to free and update last_page_.
+ // Please note pages can only be freed in whole chunks.
+ last_page_ = top_page;
+ for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+ pages_to_free--;
last_page_ = p;
- p = p->next_page();
}
- // The difference between free_pages and pages_to_keep is the number of
- // pages actually freed.
- ASSERT(pages_to_keep <= free_pages);
- int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
- accounting_stats_.ShrinkSpace(bytes_freed);
-
+ accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
}
@@ -963,13 +952,13 @@ void NewSpace::Flip() {
}
-bool NewSpace::Double() {
- ASSERT(capacity_ <= maximum_capacity_ / 2);
+bool NewSpace::Grow() {
+ ASSERT(capacity_ < maximum_capacity_);
// TODO(1240712): Failure to double the from space can result in
// semispaces of different sizes. In the event of that failure, the
// to space doubling should be rolled back before returning false.
- if (!to_space_.Double() || !from_space_.Double()) return false;
- capacity_ *= 2;
+ if (!to_space_.Grow() || !from_space_.Grow()) return false;
+ capacity_ = to_space_.Capacity() + from_space_.Capacity();
allocation_info_.limit = to_space_.high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
@@ -1039,6 +1028,26 @@ void NewSpace::Verify() {
#endif
+bool SemiSpace::Commit() {
+ ASSERT(!is_committed());
+ if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) {
+ return false;
+ }
+ committed_ = true;
+ return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+ ASSERT(is_committed());
+ if (!MemoryAllocator::UncommitBlock(start_, capacity_)) {
+ return false;
+ }
+ committed_ = false;
+ return true;
+}
+
+
// -----------------------------------------------------------------------------
// SemiSpace implementation
@@ -1053,18 +1062,15 @@ bool SemiSpace::Setup(Address start,
// addresses.
capacity_ = initial_capacity;
maximum_capacity_ = maximum_capacity;
-
- if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
- return false;
- }
+ committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTag;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
-
age_mark_ = start_;
- return true;
+
+ return Commit();
}
@@ -1074,11 +1080,16 @@ void SemiSpace::TearDown() {
}
-bool SemiSpace::Double() {
- if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
+bool SemiSpace::Grow() {
+ // Commit 50% extra space but only up to maximum capacity.
+ int extra = RoundUp(capacity_ / 2, OS::AllocateAlignment());
+ if (capacity_ + extra > maximum_capacity_) {
+ extra = maximum_capacity_ - capacity_;
+ }
+ if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
return false;
}
- capacity_ *= 2;
+ capacity_ += extra;
return true;
}
diff --git a/V8Binding/v8/src/spaces.h b/V8Binding/v8/src/spaces.h
index 57e7c1f..15f0628 100644
--- a/V8Binding/v8/src/spaces.h
+++ b/V8Binding/v8/src/spaces.h
@@ -367,6 +367,13 @@ class MemoryAllocator : public AllStatic {
// and false otherwise.
static bool CommitBlock(Address start, size_t size, Executability executable);
+
+ // Uncommit a contiguous block of memory [start..(start+size)[.
+ // start is not NULL, the size is greater than zero, and the
+ // block is contained in the initial chunk. Returns true if it succeeded
+ // and false otherwise.
+ static bool UncommitBlock(Address start, size_t size);
+
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// allocate memory for the OS or cannot allocate a single page, this
@@ -997,11 +1004,11 @@ class SemiSpace : public Space {
// True if the space has been set up but not torn down.
bool HasBeenSetup() { return start_ != NULL; }
- // Double the size of the semispace by committing extra virtual memory.
+ // Grow the size of the semispace by committing extra virtual memory.
// Assumes that the caller has checked that the semispace has not reached
// its maximum capacity (and thus there is space available in the reserved
// address range to grow).
- bool Double();
+ bool Grow();
// Returns the start address of the space.
Address low() { return start_; }
@@ -1035,11 +1042,18 @@ class SemiSpace : public Space {
return 0;
}
+ bool is_committed() { return committed_; }
+ bool Commit();
+ bool Uncommit();
+
#ifdef DEBUG
virtual void Print();
virtual void Verify();
#endif
+ // Returns the current capacity of the semi space.
+ int Capacity() { return capacity_; }
+
private:
// The current and maximum capacity of the space.
int capacity_;
@@ -1055,6 +1069,8 @@ class SemiSpace : public Space {
uintptr_t object_mask_;
uintptr_t object_expected_;
+ bool committed_;
+
public:
TRACK_MEMORY("SemiSpace")
};
@@ -1131,9 +1147,9 @@ class NewSpace : public Space {
// Flip the pair of spaces.
void Flip();
- // Doubles the capacity of the semispaces. Assumes that they are not at
+ // Grow the capacity of the semispaces. Assumes that they are not at
// their maximum capacity. Returns a flag indicating success or failure.
- bool Double();
+ bool Grow();
// True if the address or object lies in the address range of either
// semispace (not necessarily below the allocation pointer).
@@ -1247,6 +1263,17 @@ class NewSpace : public Space {
void RecordPromotion(HeapObject* obj);
#endif
+ // Return whether the operation succeded.
+ bool CommitFromSpaceIfNeeded() {
+ if (from_space_.is_committed()) return true;
+ return from_space_.Commit();
+ }
+
+ bool UncommitFromSpace() {
+ if (!from_space_.is_committed()) return true;
+ return from_space_.Uncommit();
+ }
+
private:
// The current and maximum capacities of a semispace.
int capacity_;
@@ -1547,7 +1574,7 @@ class FixedSpace : public PagedSpace {
// Give a fixed sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
- accounting_stats_.DeallocateBytes(Map::kSize);
+ accounting_stats_.DeallocateBytes(object_size_in_bytes_);
}
// Prepares for a mark-compact GC.
diff --git a/V8Binding/v8/src/third_party/valgrind/valgrind.h b/V8Binding/v8/src/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..47f369b
--- /dev/null
+++ b/V8Binding/v8/src/third_party/valgrind/valgrind.h
@@ -0,0 +1,3924 @@
+/* -*- c -*-
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query Valgrind's
+ execution inside your own programs.
+
+ The resulting executables will still run without Valgrind, just a
+ little bit more slowly than they otherwise would, but otherwise
+ unchanged. When not running on valgrind, each client request
+ consumes very few (eg. 7) instructions, so the resulting performance
+ loss is negligible unless you plan to execute client requests
+ millions of times per second. Nevertheless, if that is still a
+ problem, you can compile with the NVALGRIND symbol defined (gcc
+ -DNVALGRIND) so that client requests are not even compiled in. */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi. So
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
+ use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter (on Linux, that is). */
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#if !defined(_AIX) && defined(__i386__)
+# define PLAT_x86_linux 1
+#elif !defined(_AIX) && defined(__x86_64__)
+# define PLAT_amd64_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+# define PLAT_ppc32_linux 1
+#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+# define PLAT_ppc64_linux 1
+#elif defined(_AIX) && defined(__64BIT__)
+# define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+# define PLAT_ppc32_aix5 1
+#endif
+
+
+/* If we're not compiling for our target platform, don't generate
+ any inline asms. */
+#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
+ && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
+ && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
+# if !defined(NVALGRIND)
+# define NVALGRIND 1
+# endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
+ }
+
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
+
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently five.
+
+ The macro args are:
+ _zzq_rlval result lvalue
+ _zzq_default default value (result returned when running on real CPU)
+ _zzq_request request code
+ _zzq_arg1..5 request params
+
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
+ guest's NRADDR pseudo-register and whatever other information is
+ needed to safely run the call original from the wrapper: on
+ ppc64-linux, the R2 value at the divert point is also needed. This
+ information is abstracted into a user-visible type, OrigFn.
+
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+ guest, but guarantees that the branch instruction will not be
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
+ complete inline asm, since it needs to be combined with more magic
+ inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[6]; \
+ unsigned int _zzq_result; \
+ unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[6]; \
+ register unsigned long long int _zzq_result __asm__("r3"); \
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[7]; \
+ register unsigned int _zzq_result; \
+ register unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "lwz 3, 24(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[7]; \
+ register unsigned long long int _zzq_result; \
+ register unsigned long long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "ld 3, 48(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ _vgwZU_##soname##_##fnname
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ _vgwZZ_##soname##_##fnname
+
+/* Use this macro from within a wrapper function to collect the
+ context (address and possibly other info) of the original function.
+ Once you have that you can then use it in one of the CALL_FN_
+ macros. The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+/* ------------------------- x86-linux ------------------------- */
+
+#if defined(PLAT_x86_linux)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $4, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $8, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $12, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $20, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $24, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $28, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $36, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $40, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $44, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_x86_linux */
+
+/* ------------------------ amd64-linux ------------------------ */
+
+#if defined(PLAT_amd64_linux)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
+ macros. In order not to trash the stack redzone, we need to drop
+ %rsp by 128 before the hidden call, and restore afterwards. The
+ nastyness is that it is only by luck that the stack still appears
+ to be unwindable during the hidden call - since then the behaviour
+ of any routine using this macro does not match what the CFI data
+ says. Sigh.
+
+ Why is this important? Imagine that a wrapper has a stack
+ allocated local, and passes to the hidden call, a pointer to it.
+ Because gcc does not know about the hidden call, it may allocate
+ that local in the redzone. Unfortunately the hidden call may then
+ trash it before it comes to use it. So we must step clear of the
+ redzone, for the duration of the hidden call, to make it safe.
+
+ Probably the same problem afflicts the other redzone-style ABIs too
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ self describing (none of this CFI nonsense) so at least messing
+ with the stack pointer doesn't give a danger of non-unwindable
+ stack. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $8, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $16, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $24, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $32, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $40, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $48, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,20(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "lwz 3," #_n_fr "(1)\n\t" \
+ "stw 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,68(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "ld 3," #_n_fr "(1)\n\t" \
+ "std 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes. There are many more of these, but most are not
+ exposed to end-user view. These are the public ones, all of the
+ form 0x1000 + small_number.
+
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503
+ } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+# define __extension__ /* */
+#endif
+
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
+ since it provides a way to make sure valgrind will retranslate the
+ invalidated area. Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ }
+
+
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. */
+
+#if defined(NVALGRIND)
+
+# define VALGRIND_PRINTF(...)
+# define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+/* Modern GCC will optimize the static routine out if unused,
+ and unused attribute will shut down warnings about it. */
+static int VALGRIND_PRINTF(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
+ (unsigned long)format, (unsigned long)vargs,
+ 0, 0, 0);
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+ (unsigned long)format, (unsigned long)vargs,
+ 0, 0, 0);
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+ real CPU, calling an arbitary function.
+
+ Note that the current ThreadId is inserted as the first argument.
+ So this call:
+
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+ requires f to have this signature:
+
+ Word f(Word tid, Word arg1, Word arg2)
+
+ where "Word" is a word-sized type.
+
+ Note that these client requests are not entirely reliable. For example,
+ if you call a function with them that subsequently calls printf(),
+ there's a high chance Valgrind will crash. Generally, your prospects of
+ these working are made higher if the called function does not refer to
+ any global variables, and does not refer to any libc or other functions
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
+ likely to have a bad outcome, for tricky reasons which we've grappled
+ with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0); \
+ _qyy_res; \
+ })
+
+
+/* Counts the number of errors that have been recorded by a tool. Nb:
+ the tool must record the errors with VG_(maybe_record_error)() or
+ VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS \
+ __extension__ \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+/* Mark a block of memory as having been allocated by a malloc()-like
+ function. `addr' is the start of the usable block (ie. after any
+ redzone) `rzB' is redzone size if the allocator can apply redzones;
+ use '0' if not. Adding redzones makes it more likely Valgrind will spot
+ block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
+ for calloc(). Put it immediately after the point where a block is
+ allocated.
+
+ If you're using Memcheck: If you're allocating memory via superblocks,
+ and then handing out small chunks of each superblock, if you don't have
+ redzones on your small blocks, it's worth marking the superblock with
+ VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
+ detected. But if you can put redzones on, it's probably better to not do
+ this, so that messages for small overruns are described in terms of the
+ small block rather than the superblock (but if you have a big overrun
+ that skips over a redzone, you could miss an error this way). See
+ memcheck/tests/custom_alloc.c for an example.
+
+ WARNING: if your allocator uses malloc() or 'new' to allocate
+ superblocks, rather than mmap() or brk(), this will not work properly --
+ you'll likely get assertion failures during leak detection. This is
+ because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
+
+ Nb: block must be freed via a free()-like function specified
+ with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0); \
+ }
+
+/* Mark a block of memory as having been freed by a free()-like function.
+ `rzB' is redzone size; it must match that given to
+ VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
+ checker. Put it immediately after the point where the block is freed. */
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0); \
+ }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0); \
+ }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0); \
+ }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0); \
+ }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0); \
+ }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool) \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_EXISTS, \
+ pool, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end) \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Unmark the piece of memory associated with a stack id as being a
+ stack. */
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0); \
+ }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0); \
+ }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif /* __VALGRIND_H */
diff --git a/V8Binding/v8/src/uri.js b/V8Binding/v8/src/uri.js
index fe659aa..0dfe765 100644
--- a/V8Binding/v8/src/uri.js
+++ b/V8Binding/v8/src/uri.js
@@ -39,6 +39,10 @@ function URIAddEncodedOctetToBuffer(octet, result, index) {
function URIEncodeOctets(octets, result, index) {
+ if (hexCharCodeArray === 0) {
+ hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 65, 66, 67, 68, 69, 70];
+ }
index = URIAddEncodedOctetToBuffer(octets[0], result, index);
if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
@@ -316,11 +320,9 @@ function URIEncodeComponent(component) {
}
-const hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
- "A", "B", "C", "D", "E", "F"];
-
-const hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 65, 66, 67, 68, 69, 70];
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
function HexValueOf(c) {
@@ -341,6 +343,10 @@ function HexValueOf(c) {
// 64 -> 0040, 62234 -> F31A.
function CharCodeToHex4Str(cc) {
var r = "";
+ if (hexCharArray === 0) {
+ hexCharArray = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "A", "B", "C", "D", "E", "F"];
+ }
for (var i = 0; i < 4; ++i) {
var c = hexCharArray[cc & 0x0F];
r = c + r;
diff --git a/V8Binding/v8/src/v8.cc b/V8Binding/v8/src/v8.cc
index 72f74aa..00e0e6e 100644
--- a/V8Binding/v8/src/v8.cc
+++ b/V8Binding/v8/src/v8.cc
@@ -156,6 +156,15 @@ uint32_t V8::Random() {
return (hi << 16) + (lo & 0xFFFF);
}
+void V8::IdleNotification(bool is_high_priority) {
+ if (!FLAG_use_idle_notification) return;
+ // Ignore high priority instances of V8.
+ if (is_high_priority) return;
+
+ // Uncommit unused memory in new space.
+ Heap::UncommitFromSpace();
+}
+
Smi* V8::RandomPositiveSmi() {
uint32_t random = Random();
diff --git a/V8Binding/v8/src/v8.h b/V8Binding/v8/src/v8.h
index 2cfce3d..1ca3245 100644
--- a/V8Binding/v8/src/v8.h
+++ b/V8Binding/v8/src/v8.h
@@ -99,6 +99,9 @@ class V8 : public AllStatic {
static uint32_t Random();
static Smi* RandomPositiveSmi();
+ // Idle notification directly from the API.
+ static void IdleNotification(bool is_high_priority);
+
private:
// True if engine is currently running
static bool is_running_;
diff --git a/V8Binding/v8/src/v8natives.js b/V8Binding/v8/src/v8natives.js
index 841c920..be92347 100644
--- a/V8Binding/v8/src/v8natives.js
+++ b/V8Binding/v8/src/v8natives.js
@@ -46,12 +46,16 @@ const $isFinite = GlobalIsFinite;
// Helper function used to install functions on objects.
function InstallFunctions(object, attributes, functions) {
+ if (functions.length >= 8) {
+ %OptimizeObjectForAddingMultipleProperties(object, functions.length >> 1);
+ }
for (var i = 0; i < functions.length; i += 2) {
var key = functions[i];
var f = functions[i + 1];
%FunctionSetName(f, key);
%SetProperty(object, key, f, attributes);
}
+ %TransformToFastProperties(object);
}
// Emulates JSC by installing functions on a hidden prototype that
@@ -453,9 +457,11 @@ function NumberToJSON(key) {
// ----------------------------------------------------------------------------
function SetupNumber() {
+ %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
// Setup the constructor property on the Number prototype object.
%SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
+ %OptimizeObjectForAddingMultipleProperties($Number, 5);
// ECMA-262 section 15.7.3.1.
%SetProperty($Number,
"MAX_VALUE",
@@ -479,6 +485,7 @@ function SetupNumber() {
"POSITIVE_INFINITY",
1/0,
DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %TransformToFastProperties($Number);
// Setup non-enumerable functions on the Number prototype object.
InstallFunctions($Number.prototype, DONT_ENUM, $Array(
diff --git a/V8Binding/v8/src/version.cc b/V8Binding/v8/src/version.cc
index c23e2d5..791c87c 100644
--- a/V8Binding/v8/src/version.cc
+++ b/V8Binding/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 3
+#define BUILD_NUMBER 5
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index b4fd678..2607ecc 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -437,21 +437,43 @@ void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
}
-void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
+void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
+ emit_rex_64(reg, rm_reg);
emit(opcode);
- emit_modrm(dst, src);
+ emit_modrm(reg, rm_reg);
}
-void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
+void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(dst, src);
+ emit(0x66);
+ emit_optional_rex_32(reg, rm_reg);
emit(opcode);
- emit_modrm(dst, src);
+ emit_modrm(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_16(byte opcode,
+ Register reg,
+ const Operand& rm_reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(reg, rm_reg);
+ emit(opcode);
+ emit_operand(reg, rm_reg);
+}
+
+
+void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(reg, rm_reg);
+ emit(opcode);
+ emit_modrm(reg, rm_reg);
}
@@ -504,6 +526,47 @@ void Assembler::immediate_arithmetic_op(byte subcode,
}
+void Assembler::immediate_arithmetic_op_16(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66); // Operand size override prefix.
+ emit_optional_rex_32(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
+ } else {
+ emit(0x81);
+ emit_modrm(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::immediate_arithmetic_op_16(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66); // Operand size override prefix.
+ emit_optional_rex_32(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_operand(subcode, dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+
void Assembler::immediate_arithmetic_op_32(byte subcode,
Register dst,
Immediate src) {
@@ -744,6 +807,14 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
}
+void Assembler::cmpb_al(Immediate imm8) {
+ ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x3c);
+ emit(imm8.value_);
+}
+
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
@@ -1193,6 +1264,32 @@ void Assembler::movq(const Operand& dst, Immediate value) {
}
+/*
+ * Loads the ip-relative location of the src label into the target
+ * location (as a 32-bit offset sign extended to 64-bit).
+ */
+void Assembler::movl(const Operand& dst, Label* src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xC7);
+ emit_operand(0, dst);
+ if (src->is_bound()) {
+ int offset = src->pos() - pc_offset() - sizeof(int32_t);
+ ASSERT(offset <= 0);
+ emitl(offset);
+ } else if (src->is_linked()) {
+ emitl(src->pos());
+ src->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(src->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ src->link_to(current);
+ }
+}
+
+
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
@@ -1608,6 +1705,11 @@ void Assembler::testl(Register dst, Register src) {
void Assembler::testl(Register reg, Immediate mask) {
+ // testl with a mask that fits in the low byte is exactly testb.
+ if (is_uint8(mask.value_)) {
+ testb(reg, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (reg.is(rax)) {
@@ -1623,6 +1725,11 @@ void Assembler::testl(Register reg, Immediate mask) {
void Assembler::testl(const Operand& op, Immediate mask) {
+ // testl with a mask that fits in the low byte is exactly testb.
+ if (is_uint8(mask.value_)) {
+ testb(op, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(rax, op);
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index 015fa68..2130fb0 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -496,13 +496,17 @@ class Assembler : public Malloced {
// Load a 32-bit immediate value, zero-extended to 64 bits.
void movl(Register dst, Immediate imm32);
+ // Move 64 bit register value to 64-bit memory location.
+ void movq(const Operand& dst, Register src);
+ // Move 64 bit memory location to 64-bit register value.
void movq(Register dst, const Operand& src);
+ void movq(Register dst, Register src);
// Sign extends immediate 32-bit value to 64 bits.
void movq(Register dst, Immediate x);
- void movq(Register dst, Register src);
+ // Move the offset of the label location relative to the current
+ // position (after the move) to the destination.
+ void movl(const Operand& dst, Label* src);
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
// New x64 instructions to load a 64-bit immediate into a register.
@@ -535,7 +539,11 @@ class Assembler : public Malloced {
// Arithmetics
void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
+ if (dst.low_bits() == 4) { // Forces SIB byte.
+ arithmetic_op_32(0x01, src, dst);
+ } else {
+ arithmetic_op_32(0x03, dst, src);
+ }
}
void addl(Register dst, Immediate src) {
@@ -574,10 +582,44 @@ class Assembler : public Malloced {
immediate_arithmetic_op_8(0x7, dst, src);
}
+ void cmpb_al(Immediate src);
+
+ void cmpb(Register dst, Register src) {
+ arithmetic_op(0x3A, dst, src);
+ }
+
+ void cmpb(Register dst, const Operand& src) {
+ arithmetic_op(0x3A, dst, src);
+ }
+
+ void cmpb(const Operand& dst, Register src) {
+ arithmetic_op(0x38, src, dst);
+ }
+
void cmpb(const Operand& dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
+ void cmpw(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_16(0x7, dst, src);
+ }
+
+ void cmpw(Register dst, Immediate src) {
+ immediate_arithmetic_op_16(0x7, dst, src);
+ }
+
+ void cmpw(Register dst, const Operand& src) {
+ arithmetic_op_16(0x3B, dst, src);
+ }
+
+ void cmpw(Register dst, Register src) {
+ arithmetic_op_16(0x3B, dst, src);
+ }
+
+ void cmpw(const Operand& dst, Register src) {
+ arithmetic_op_16(0x39, src, dst);
+ }
+
void cmpl(Register dst, Register src) {
arithmetic_op_32(0x3B, dst, src);
}
@@ -794,6 +836,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x5, dst, src);
}
+ void subb(Register dst, Immediate src) {
+ immediate_arithmetic_op_8(0x5, dst, src);
+ }
+
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register dst, Register src);
@@ -1141,26 +1187,36 @@ class Assembler : public Malloced {
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
- void arithmetic_op(byte opcode, Register dst, Register src);
- void arithmetic_op_32(byte opcode, Register dst, Register src);
+ void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
+ void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op(byte opcode, Register reg, Register rm_reg);
void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
+ Register dst,
Immediate src);
void immediate_arithmetic_op_8(byte subcode,
- Register dst,
+ const Operand& dst,
Immediate src);
+ // Operate on a word in memory or register.
+ void immediate_arithmetic_op_16(byte subcode,
+ Register dst,
+ Immediate src);
+ void immediate_arithmetic_op_16(byte subcode,
+ const Operand& dst,
+ Immediate src);
+ // Operate on a 32-bit word in memory or register.
+ void immediate_arithmetic_op_32(byte subcode,
+ Register dst,
+ Immediate src);
+ void immediate_arithmetic_op_32(byte subcode,
+ const Operand& dst,
+ Immediate src);
+
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
void shift_32(Register dst, Immediate shift_amount, int subcode);
@@ -1180,6 +1236,7 @@ class Assembler : public Malloced {
friend class CodePatcher;
friend class EnsureSpace;
+ friend class RegExpMacroAssemblerX64;
// Code buffer:
// The buffer into which code and relocation info are generated.
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index 87f1040..4f332ca 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -123,6 +123,27 @@ class DeferredInlineSmiAdd: public DeferredCode {
};
+// The result of value + src is in dst. It either overflowed or was not
+// smi tagged. Undo the speculative addition and call the appropriate
+// specialized stub for add. The result is left in dst.
+class DeferredInlineSmiAddReversed: public DeferredCode {
+ public:
+ DeferredInlineSmiAddReversed(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAddReversed");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
class DeferredInlineSmiSub: public DeferredCode {
public:
DeferredInlineSmiSub(Register dst,
@@ -3063,26 +3084,19 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
is_increment);
}
- Result tmp = allocator_->AllocateWithoutSpilling();
- ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
- __ movl(tmp.reg(), Immediate(kSmiTagMask));
- // Smi test.
__ movq(kScratchRegister, new_value.reg());
if (is_increment) {
__ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
} else {
__ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
}
- // deferred->Branch(overflow);
- __ cmovl(overflow, kScratchRegister, tmp.reg());
- __ testl(kScratchRegister, tmp.reg());
- tmp.Unuse();
+ // Smi test.
+ deferred->Branch(overflow);
+ __ testl(kScratchRegister, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ movq(new_value.reg(), kScratchRegister);
-
deferred->BindExit();
-
// Postfix: store the old value in the allocated slot under the
// reference.
if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -5057,27 +5071,6 @@ void DeferredInlineSmiAdd::Generate() {
}
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
void DeferredInlineSmiAddReversed::Generate() {
__ push(Immediate(value_)); // Note: sign extended.
__ push(dst_);
@@ -5166,7 +5159,184 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
frame_->Push(operand);
break;
}
- // TODO(X64): Move other implementations from ia32 to here.
+
+ case Token::SUB: {
+ if (reversed) {
+ Result constant_operand(value);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ // A smi currently fits in a 32-bit Immediate.
+ __ subl(operand->reg(), Immediate(smi_value));
+ Label add_success;
+ __ j(no_overflow, &add_success);
+ __ addl(operand->reg(), Immediate(smi_value));
+ deferred->Jump();
+ __ bind(&add_success);
+ deferred->BindExit();
+ frame_->Push(operand);
+ }
+ break;
+ }
+
+ case Token::SAR:
+ if (reversed) {
+ Result constant_operand(value);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ if (shift_value > 0) {
+ __ sarl(operand->reg(), Immediate(shift_value));
+ __ and_(operand->reg(), Immediate(~kSmiTagMask));
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ }
+ break;
+
+ case Token::SHR:
+ if (reversed) {
+ Result constant_operand(value);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ __ movl(answer.reg(), operand->reg());
+ __ sarl(answer.reg(), Immediate(kSmiTagSize));
+ __ shrl(answer.reg(), Immediate(shift_value));
+ // A negative Smi shifted right two is in the positive Smi range.
+ if (shift_value < 2) {
+ __ testl(answer.reg(), Immediate(0xc0000000));
+ deferred->Branch(not_zero);
+ }
+ operand->Unuse();
+ ASSERT(kSmiTag == 0);
+ ASSERT(kSmiTagSize == 1);
+ __ addl(answer.reg(), answer.reg());
+ deferred->BindExit();
+ frame_->Push(&answer);
+ }
+ break;
+
+ case Token::SHL:
+ if (reversed) {
+ Result constant_operand(value);
+ LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
+ } else {
+ // Only the least significant 5 bits of the shift value are used.
+ // In the slow case, this masking is done inside the runtime call.
+ int shift_value = int_value & 0x1f;
+ operand->ToRegister();
+ if (shift_value == 0) {
+ // Spill operand so it can be overwritten in the slow case.
+ frame_->Spill(operand->reg());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ deferred->BindExit();
+ frame_->Push(operand);
+ } else {
+ // Use a fresh temporary for nonzero shift values.
+ Result answer = allocator()->Allocate();
+ ASSERT(answer.is_valid());
+ DeferredInlineSmiOperation* deferred =
+ new DeferredInlineSmiOperation(op,
+ answer.reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ __ movl(answer.reg(), operand->reg());
+ ASSERT(kSmiTag == 0); // adjust code if not the case
+ // We do no shifts, only the Smi conversion, if shift_value is 1.
+ if (shift_value > 1) {
+ __ shll(answer.reg(), Immediate(shift_value - 1));
+ }
+ // Convert int result to Smi, checking that it is in int range.
+ ASSERT(kSmiTagSize == 1); // adjust code if not the case
+ __ addl(answer.reg(), answer.reg());
+ deferred->Branch(overflow);
+ deferred->BindExit();
+ operand->Unuse();
+ frame_->Push(&answer);
+ }
+ }
+ break;
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ if (reversed) {
+ // Bit operations with a constant smi are commutative.
+ // We can swap left and right operands with no problem.
+ // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
+ overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
+ }
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ __ testl(operand->reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ if (op == Token::BIT_AND) {
+ __ and_(operand->reg(), Immediate(smi_value));
+ } else if (op == Token::BIT_XOR) {
+ if (int_value != 0) {
+ __ xor_(operand->reg(), Immediate(smi_value));
+ }
+ } else {
+ ASSERT(op == Token::BIT_OR);
+ if (int_value != 0) {
+ __ or_(operand->reg(), Immediate(smi_value));
+ }
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ break;
+ }
// Generate inline code for mod of powers of 2 and negative powers of 2.
case Token::MOD:
@@ -5888,6 +6058,8 @@ void Reference::SetValue(InitState init_state) {
__ testl(key.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
deferred->Branch(not_zero);
+ // Ensure that the smi is zero-extended. This is not guaranteed.
+ __ movl(key.reg(), key.reg());
// Check that the receiver is not a smi.
__ testl(receiver.reg(), Immediate(kSmiTagMask));
@@ -7047,14 +7219,14 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&load_smi_1);
- __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_1);
__ bind(&load_smi_2);
- __ sar(kScratchRegister, Immediate(kSmiTagSize));
+ __ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
@@ -7409,7 +7581,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
__ ret(2 * kPointerSize);
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index b1c61d8..d78be49 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -299,14 +299,9 @@ class CodeGenerator: public AstVisitor {
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
- int length,
- int function_token_position,
- int start_position,
- int end_position,
- bool is_expression,
+ FunctionLiteral* lit,
bool is_toplevel,
- Handle<Script> script,
- Handle<String> inferred_name);
+ Handle<Script> script);
// Accessors
MacroAssembler* masm() { return masm_; }
diff --git a/V8Binding/v8/src/x64/disasm-x64.cc b/V8Binding/v8/src/x64/disasm-x64.cc
index cc8365c..d8d6dbb 100644
--- a/V8Binding/v8/src/x64/disasm-x64.cc
+++ b/V8Binding/v8/src/x64/disasm-x64.cc
@@ -105,7 +105,6 @@ static ByteMnemonic two_operands_instr[] = {
static ByteMnemonic zero_operands_instr[] = {
{ 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" },
- { 0x90, UNSET_OP_ORDER, "nop" },
{ 0xF4, UNSET_OP_ORDER, "hlt" },
{ 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" },
@@ -1425,7 +1424,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
default:
UNREACHABLE();
}
- AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux",
+ AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
operand_size_code(),
value);
break;
diff --git a/V8Binding/v8/src/x64/jump-target-x64.cc b/V8Binding/v8/src/x64/jump-target-x64.cc
index b804044..dd2f6d6 100644
--- a/V8Binding/v8/src/x64/jump-target-x64.cc
+++ b/V8Binding/v8/src/x64/jump-target-x64.cc
@@ -362,6 +362,70 @@ void JumpTarget::DoBind() {
__ bind(&entry_label_);
}
+
+void BreakTarget::Jump() {
+ // Drop leftover statement state from the frame before merging, without
+ // emitting code.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ DoJump();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+ // Drop leftover statement state from the frame before merging, without
+ // emitting code.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ DoJump();
+}
+
+
+void BreakTarget::Bind() {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even on
+ // the fall through. This is so we can bind the return target with state
+ // on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ }
+ DoBind();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_ + 1);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even on
+ // the fall through. This is so we can bind the return target with state
+ // on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->ForgetElements(count);
+ cgen()->frame()->Push(arg);
+ }
+ DoBind();
+ *arg = cgen()->frame()->Pop();
+}
+
+
#undef __
diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
index 209aa2d..1e38d6d 100644
--- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -25,3 +25,1277 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+#include "serialize.h"
+#include "unicode.h"
+#include "log.h"
+#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * This assembler uses the following register assignment convention
+ * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - rdi : current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character
+ * offset! Is always a 32-bit signed (negative) offset, but must be
+ * maintained sign-extended to 64 bits, since it is used as index.
+ * - rsi : end of input (points to byte after last character in input),
+ * so that rsi+rdi points to the current character.
+ * - rbp : frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - rsp : points to tip of C stack.
+ * - rcx : points to tip of backtrack stack. The backtrack stack contains
+ * only 32-bit values. Most are offsets from some base (e.g., character
+ * positions from end of string or code location from Code* pointer).
+ * - r8 : code object pointer. Used to convert between absolute and
+ * code-object-relative addresses.
+ *
+ * The registers rax, rbx, rcx, r9 and r11 are free to use for computations.
+ * If changed to use r12+, they should be saved as callee-save registers.
+ *
+ * Each call to a C++ method should retain these registers.
+ *
+ * The stack will have the following content, in some order, indexable from the
+ * frame pointer (see, e.g., kStackHighEnd):
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, start at start of string, if 0, don't)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - String** input_string (location of a handle containing the string)
+ * - return address
+ * - backup of callee save registers (rbx, possibly rsi and rdi).
+ * - Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a non-position.
+ * - register 0 rbp[-n] (Only positions must be stored in the first
+ * - register 1 rbp[-n-8] num_saved_registers_ registers)
+ * - ...
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers starts out uninitialized.
+ *
+ * The first seven values must be provided by the calling code by
+ * calling the code's entry address cast to a function pointer with the
+ * following signature:
+ * int (*match)(String* input_string,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * bool at_start,
+ * byte* stack_area_base)
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
+ Mode mode,
+ int registers_to_save)
+ : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ code_relative_fixup_positions_(4),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ jmp(&entry_label_); // We'll write the entry code when we know more.
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerX64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ Label inside_string;
+ __ addq(rdi, Immediate(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ addq(register_location(reg), Immediate(by));
+ }
+}
+
+
+void RegExpMacroAssemblerX64::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(rbx);
+ __ addq(rbx, code_object_pointer());
+ __ jmp(rbx);
+}
+
+
+void RegExpMacroAssemblerX64::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ cmpl(current_character(), Immediate(c));
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ cmpl(current_character(), Immediate(limit));
+ BranchOrBacktrack(greater, on_greater);
+}
+
+
+void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ cmpb(Operand(rbp, kAtStart), Immediate(0));
+ BranchOrBacktrack(equal, &not_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpq(rax, Operand(rbp, kInputStart));
+ BranchOrBacktrack(equal, on_at_start);
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ cmpb(Operand(rbp, kAtStart), Immediate(0));
+ BranchOrBacktrack(equal, on_not_at_start);
+ // If we did, are we still at the start of the input?
+ __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpq(rax, Operand(rbp, kInputStart));
+ BranchOrBacktrack(not_equal, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ cmpl(current_character(), Immediate(limit));
+ BranchOrBacktrack(less, on_less);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ int byte_length = str.length() * char_size();
+ int byte_offset = cp_offset * char_size();
+ if (check_end_of_string) {
+ // Check that there are at least str.length() characters left in the input.
+ __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
+ BranchOrBacktrack(greater, on_failure);
+ }
+
+ if (on_failure == NULL) {
+ // Instead of inlining a backtrack, (re)use the global backtrack target.
+ on_failure = &backtrack_label_;
+ }
+
+ // TODO(lrn): Test multiple characters at a time by loading 4 or 8 bytes
+ // at a time.
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ cmpb(Operand(rsi, rdi, times_1, byte_offset + i),
+ Immediate(static_cast<int8_t>(str[i])));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ cmpw(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
+ Immediate(str[i]));
+ }
+ BranchOrBacktrack(not_equal, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
+ Label fallthrough;
+ __ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
+ __ j(not_equal, &fallthrough);
+ Drop();
+ BranchOrBacktrack(no_condition, on_equal);
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ movq(rdx, register_location(start_reg)); // Offset of start of capture
+ __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
+ __ subq(rbx, rdx); // Length of capture.
+
+ // -----------------------
+ // rdx = Start offset of capture.
+ // rbx = Length of capture
+
+ // If length is negative, this code will fail (it's a symptom of a partial or
+ // illegal capture where start of capture after end of capture).
+ // This must not happen (no back-reference can reference a capture that wasn't
+ // closed before in the reg-exp, and we must not generate code that can cause
+ // this condition).
+
+ // If length is zero, either the capture is empty or it is nonparticipating.
+ // In either case succeed immediately.
+ __ j(equal, &fallthrough);
+
+ if (mode_ == ASCII) {
+ Label loop_increment;
+ if (on_no_match == NULL) {
+ on_no_match = &backtrack_label_;
+ }
+
+ __ lea(r9, Operand(rsi, rdx, times_1, 0));
+ __ lea(r11, Operand(rsi, rdi, times_1, 0));
+ __ addq(rbx, r9); // End of capture
+ // ---------------------
+ // r11 - current input character address
+ // r9 - current capture character address
+ // rbx - end of capture
+
+ Label loop;
+ __ bind(&loop);
+ __ movzxbl(rdx, Operand(r9, 0));
+ __ movzxbl(rax, Operand(r11, 0));
+ // al - input character
+ // dl - capture character
+ __ cmpb(rax, rdx);
+ __ j(equal, &loop_increment);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
+ // a match.
+ __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ cmpb(rax, rdx);
+ __ j(not_equal, on_no_match); // Definitely not equal.
+ __ subb(rax, Immediate('a'));
+ __ cmpb(rax, Immediate('z' - 'a'));
+ __ j(above, on_no_match); // Weren't letters anyway.
+
+ __ bind(&loop_increment);
+ // Increment pointers into match and capture strings.
+ __ addq(r11, Immediate(1));
+ __ addq(r9, Immediate(1));
+ // Compare to end of capture, and loop if not done.
+ __ cmpq(r9, rbx);
+ __ j(below, &loop);
+
+ // Compute new value of character position after the matched part.
+ __ movq(rdi, r11);
+ __ subq(rdi, rsi);
+ } else {
+ ASSERT(mode_ == UC16);
+ // Save important/volatile registers before calling C function.
+#ifndef __MSVC__
+ // Callee save on Win64
+ __ push(rsi);
+ __ push(rdi);
+#endif
+ __ push(backtrack_stackpointer());
+
+ int num_arguments = 3;
+ FrameAlign(num_arguments);
+
+ // Put arguments into parameter registers. Parameters are
+ // Address byte_offset1 - Address captured substring's start.
+ // Address byte_offset2 - Address of current character position.
+ // size_t byte_length - length of capture in bytes(!)
+#ifdef __MSVC__
+ // Compute and set byte_offset1 (start of capture).
+ __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ // Set byte_offset2.
+ __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ // Set byte_length.
+ __ movq(r8, rbx);
+#else // AMD64 calling convention
+ // Compute byte_offset2 (current position = rsi+rdi).
+ __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ // Compute and set byte_offset1 (start of capture).
+ __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ // Set byte_offset2.
+ __ movq(rsi, rax);
+ // Set byte_length.
+ __ movq(rdx, rbx);
+#endif
+ Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16);
+ CallCFunction(function_address, num_arguments);
+
+ // Restore original values before reacting on result value.
+ __ Move(code_object_pointer(), masm_->CodeObject());
+ __ pop(backtrack_stackpointer());
+#ifndef __MSVC__
+ __ pop(rdi);
+ __ pop(rsi);
+#endif
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ testq(rax, rax);
+ BranchOrBacktrack(zero, on_no_match);
+ // On success, increment position by length of capture.
+ // Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
+ __ addq(rdi, rbx);
+ }
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ // Find length of back-referenced capture.
+ __ movq(rdx, register_location(start_reg));
+ __ movq(rax, register_location(start_reg + 1));
+ __ subq(rax, rdx); // Length to check.
+
+ // Fail on partial or illegal capture (start of capture after end of capture).
+ // This must not happen (no back-reference can reference a capture that wasn't
+ // closed before in the reg-exp).
+ __ Check(greater_equal, "Invalid capture referenced");
+
+ // Succeed on empty capture (including non-participating capture)
+ __ j(equal, &fallthrough);
+
+ // -----------------------
+ // rdx - Start of capture
+ // rax - length of capture
+
+ // Check that there are sufficient characters left in the input.
+ __ movl(rbx, rdi);
+ __ addl(rbx, rax);
+ BranchOrBacktrack(greater, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addq(rdx, rsi); // Start of capture.
+ __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+
+ // -----------------------
+ // rbx - current capture character address.
+ // rbx - current input character address .
+ // r9 - end of input to match (capture length after rbx).
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ movzxbl(rax, Operand(rdx, 0));
+ __ cmpb(rax, Operand(rbx, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ movzxwl(rax, Operand(rdx, 0));
+ __ cmpw(rax, Operand(rbx, 0));
+ }
+ BranchOrBacktrack(not_equal, on_no_match);
+ // Increment pointers into capture and match string.
+ __ addq(rbx, Immediate(char_size()));
+ __ addq(rdx, Immediate(char_size()));
+ // Check if we have reached end of match area.
+ __ cmpq(rdx, r9);
+ __ j(below, &loop);
+
+ // Success.
+ // Set current character position to position after match.
+ __ movq(rdi, rbx);
+ __ subq(rdi, rsi);
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotRegistersEqual(int reg1,
+ int reg2,
+ Label* on_not_equal) {
+ __ movq(rax, register_location(reg1));
+ __ cmpq(rax, register_location(reg2));
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ __ cmpl(current_character(), Immediate(c));
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ movl(rax, current_character());
+ __ and_(rax, Immediate(mask));
+ __ cmpl(rax, Immediate(c));
+ BranchOrBacktrack(equal, on_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ movl(rax, current_character());
+ __ and_(rax, Immediate(mask));
+ __ cmpl(rax, Immediate(c));
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUC16CharCode);
+ __ lea(rax, Operand(current_character(), -minus));
+ __ and_(rax, Immediate(mask));
+ __ cmpl(rax, Immediate(c));
+ BranchOrBacktrack(not_equal, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
+ int cp_offset,
+ bool check_offset,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ Label success;
+ __ cmpl(current_character(), Immediate(' '));
+ __ j(equal, &success);
+ // Check range 0x09..0x0d
+ __ subl(current_character(), Immediate('\t'));
+ __ cmpl(current_character(), Immediate('\r' - '\t'));
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // Match non-space characters.
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ __ cmpl(current_character(), Immediate(' '));
+ BranchOrBacktrack(equal, on_no_match);
+ __ subl(current_character(), Immediate('\t'));
+ __ cmpl(current_character(), Immediate('\r' - '\t'));
+ BranchOrBacktrack(below_equal, on_no_match);
+ return true;
+ }
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ subl(current_character(), Immediate('0'));
+ __ cmpl(current_character(), Immediate('9' - '0'));
+ BranchOrBacktrack(above, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ subl(current_character(), Immediate('0'));
+ __ cmpl(current_character(), Immediate('9' - '0'));
+ BranchOrBacktrack(below_equal, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ xor_(current_character(), Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ subl(current_character(), Immediate(0x0b));
+ __ cmpl(current_character(), Immediate(0x0c - 0x0b));
+ BranchOrBacktrack(below_equal, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ subl(current_character(), Immediate(0x2028 - 0x0b));
+ __ cmpl(current_character(), Immediate(1));
+ BranchOrBacktrack(below_equal, on_no_match);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ if (check_offset) {
+ CheckPosition(cp_offset, on_no_match);
+ }
+ return true;
+ // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerX64::Fail() {
+ ASSERT(FAILURE == 0); // Return value for failure is zero.
+ __ xor_(rax, rax); // zero rax.
+ __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+ // Start new stack frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ // Save parameters and callee-save registers. Order here should correspond
+ // to order of kBackup_ebx etc.
+#ifdef __MSVC__
+ // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
+ // Store register parameters in pre-allocated stack slots,
+ __ movq(Operand(rbp, kInputString), rcx);
+ __ movq(Operand(rbp, kStartIndex), rdx);
+ __ movq(Operand(rbp, kInputStart), r8);
+ __ movq(Operand(rbp, kInputEnd), r9);
+ // Callee-save on Win64.
+ __ push(rsi);
+ __ push(rdi);
+ __ push(rbx);
+#else
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
+ // Push register parameters on stack for reference.
+ ASSERT_EQ(kInputString, -1 * kPointerSize);
+ ASSERT_EQ(kStartIndex, -2 * kPointerSize);
+ ASSERT_EQ(kInputStart, -3 * kPointerSize);
+ ASSERT_EQ(kInputEnd, -4 * kPointerSize);
+ ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
+ ASSERT_EQ(kAtStart, -6 * kPointerSize);
+ __ push(rdi);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx);
+ __ push(r8);
+ __ push(r9);
+
+ __ push(rbx); // Callee-save
+#endif
+ __ push(Immediate(0)); // Make room for "input start - 1" constant.
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(rcx, rsp);
+ __ movq(kScratchRegister, stack_guard_limit);
+ __ subq(rcx, Operand(kScratchRegister, 0));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ movq(rax, Immediate(EXCEPTION));
+ __ jmp(&exit_label_);
+
+ __ bind(&stack_limit_hit);
+ __ Move(code_object_pointer(), masm_->CodeObject());
+ CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
+ __ testq(rax, rax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &exit_label_);
+
+ __ bind(&stack_ok);
+
+ // Allocate space on stack for registers.
+ __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ // Load string length.
+ __ movq(rsi, Operand(rbp, kInputEnd));
+ // Load input position.
+ __ movq(rdi, Operand(rbp, kInputStart));
+ // Set up rdi to be negative offset from string end.
+ __ subq(rdi, rsi);
+ // Set rax to address of char before start of input
+ // (effectively string position -1).
+ __ lea(rax, Operand(rdi, -char_size()));
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ movq(Operand(rbp, kInputStartMinusOne), rax);
+ if (num_saved_registers_ > 0) {
+ // Fill saved registers with initial value = start offset - 1
+ // Fill in stack push order, to avoid accessing across an unwritten
+ // page (a problem on Windows).
+ __ movq(rcx, Immediate(kRegisterZero));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ movq(Operand(rbp, rcx, times_1, 0), rax);
+ __ subq(rcx, Immediate(kPointerSize));
+ __ cmpq(rcx,
+ Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
+ __ j(greater, &init_loop);
+ }
+ // Ensure that we have written to each stack page, in order. Skipping a page
+ // on Windows can cause segmentation faults. Assuming page size is 4k.
+ const int kPageSize = 4096;
+ const int kRegistersPerPage = kPageSize / kPointerSize;
+ for (int i = num_saved_registers_ + kRegistersPerPage - 1;
+ i < num_registers_;
+ i += kRegistersPerPage) {
+ __ movq(register_location(i), rax); // One write every page.
+ }
+
+ // Initialize backtrack stack pointer.
+ __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ // Initialize code object pointer.
+ __ Move(code_object_pointer(), masm_->CodeObject());
+ // Load previous char as initial value of current-character.
+ Label at_start;
+ __ cmpq(Operand(rbp, kAtStart), Immediate(0));
+ __ j(not_equal, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ movq(current_character(), Immediate('\n'));
+ __ jmp(&start_label_);
+
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ movq(rbx, Operand(rbp, kRegisterOutput));
+ __ movq(rcx, Operand(rbp, kInputEnd));
+ __ subq(rcx, Operand(rbp, kInputStart));
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ movq(rax, register_location(i));
+ __ addq(rax, rcx); // Convert to index from start, not end.
+ if (mode_ == UC16) {
+ __ sar(rax, Immediate(1)); // Convert byte index to character index.
+ }
+ __ movl(Operand(rbx, i * kIntSize), rax);
+ }
+ }
+ __ movq(rax, Immediate(SUCCESS));
+ }
+
+ // Exit and return rax
+ __ bind(&exit_label_);
+
+#ifdef __MSVC__
+ // Restore callee save registers.
+ __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ pop(rbx);
+ __ pop(rdi);
+ __ pop(rsi);
+ // Stack now at rbp.
+#else
+ // Restore callee save register.
+ __ movq(rbx, Operand(rbp, kBackup_rbx));
+ // Skip rsp to rbp.
+ __ movq(rsp, rbp);
+#endif
+ // Exit function frame, restore previous one.
+ __ pop(rbp);
+ __ ret(0);
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ __ push(backtrack_stackpointer());
+ __ push(rdi);
+
+ CallCheckStackGuardState();
+ __ testq(rax, rax);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ j(not_zero, &exit_label_);
+
+ // Restore registers.
+ __ Move(code_object_pointer(), masm_->CodeObject());
+ __ pop(rdi);
+ __ pop(backtrack_stackpointer());
+ // String might have moved: Reload esi from frame.
+ __ movq(rsi, Operand(rbp, kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+
+ Label grow_failed;
+ // Save registers before calling C function
+#ifndef __MSVC__
+ // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
+ __ push(rsi);
+ __ push(rdi);
+#endif
+
+ // Call GrowStack(backtrack_stackpointer())
+ int num_arguments = 2;
+ FrameAlign(num_arguments);
+#ifdef __MSVC__
+ // Microsoft passes parameters in rcx, rdx.
+ // First argument, backtrack stackpointer, is already in rcx.
+ __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+#else
+ // AMD64 ABI passes paremeters in rdi, rsi.
+ __ movq(rdi, backtrack_stackpointer()); // First argument.
+ __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+#endif
+ CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ testq(rax, rax);
+ __ j(equal, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ movq(backtrack_stackpointer(), rax);
+ // Restore saved registers and continue.
+ __ Move(code_object_pointer(), masm_->CodeObject());
+#ifndef __MSVC__
+ __ pop(rdi);
+ __ pop(rsi);
+#endif
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ movq(rax, Immediate(EXCEPTION));
+ __ jmp(&exit_label_);
+ }
+
+ FixupCodeRelativePositions();
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = Factory::NewCode(code_desc,
+ NULL,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ LOG(RegExpCodeCreateEvent(*code, *source));
+ return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerX64::GoTo(Label* to) {
+ BranchOrBacktrack(no_condition, to);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ cmpq(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(greater_equal, if_ge);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ cmpq(register_location(reg), Immediate(comparand));
+ BranchOrBacktrack(less, if_lt);
+}
+
+
+void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ cmpq(rdi, register_location(reg));
+ BranchOrBacktrack(equal, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerX64::Implementation() {
+ return kX64Implementation;
+}
+
+
+void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerX64::PopCurrentPosition() {
+ Pop(rdi);
+}
+
+
+void RegExpMacroAssemblerX64::PopRegister(int register_index) {
+ Pop(rax);
+ __ movq(register_location(register_index), rax);
+}
+
+
+void RegExpMacroAssemblerX64::PushBacktrack(Label* label) {
+ Push(label);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX64::PushCurrentPosition() {
+ Push(rdi);
+}
+
+
+void RegExpMacroAssemblerX64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ movq(rax, register_location(register_index));
+ Push(rax);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
+ __ movq(rdi, register_location(reg));
+}
+
+
+void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
+ __ movq(backtrack_stackpointer(), register_location(reg));
+ __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+}
+
+
+void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ movq(register_location(register_index), Immediate(to));
+}
+
+
+void RegExpMacroAssemblerX64::Succeed() {
+ __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ movq(register_location(reg), rdi);
+ } else {
+ __ lea(rax, Operand(rdi, cp_offset * char_size()));
+ __ movq(register_location(reg), rax);
+ }
+}
+
+
+void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ movq(rax, Operand(rbp, kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ movq(register_location(reg), rax);
+ }
+}
+
+
+void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
+ __ movq(rax, backtrack_stackpointer());
+ __ subq(rax, Operand(rbp, kStackHighEnd));
+ __ movq(register_location(reg), rax);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
+ // This function call preserves no register values. Caller should
+ // store anything volatile in a C call or overwritten by this function.
+ int num_arguments = 3;
+ FrameAlign(num_arguments);
+#ifdef __MSVC__
+ // Second argument: Code* of self. (Do this before overwriting r8).
+ __ movq(rdx, code_object_pointer());
+ // Third argument: RegExp code frame pointer.
+ __ movq(r8, rbp);
+ // First argument: Next address on the stack (will be address of
+ // return address).
+ __ lea(rcx, Operand(rsp, -kPointerSize));
+#else
+ // Third argument: RegExp code frame pointer.
+ __ movq(rdx, rbp);
+ // Second argument: Code* of self.
+ __ movq(rsi, code_object_pointer());
+ // First argument: Next address on the stack (will be address of
+ // return address).
+ __ lea(rdi, Operand(rsp, -kPointerSize));
+#endif
+ CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ if (StackGuard::IsStackOverflow()) {
+ Top::StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // Prepare for possible GC.
+ HandleScope handles;
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+ // Current string.
+ bool is_ascii = subject->IsAsciiRepresentation();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = Execution::HandleStackGuardInterrupt();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ intptr_t delta = *code_handle - re_code;
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ // String might have changed.
+ if (subject->IsAsciiRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject).IsSequential() ||
+ StringShape(*subject).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ }
+
+ return 0;
+}
+
+
+Address RegExpMacroAssemblerX64::GrowStack(Address stack_pointer,
+ Address* stack_base) {
+ size_t size = RegExpStack::stack_capacity();
+ Address old_stack_base = RegExpStack::stack_base();
+ ASSERT(old_stack_base == *stack_base);
+ ASSERT(stack_pointer <= old_stack_base);
+ ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
+ Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+ if (new_stack_base == NULL) {
+ return NULL;
+ }
+ *stack_base = new_stack_base;
+ intptr_t stack_content_size = old_stack_base - stack_pointer;
+ return new_stack_base - stack_content_size;
+}
+
+
+Operand RegExpMacroAssemblerX64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return Operand(rbp, kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ __ cmpl(rdi, Immediate(-cp_offset * char_size()));
+ BranchOrBacktrack(greater_equal, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition < 0) { // No condition
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ j(condition, &backtrack_label_);
+ return;
+ }
+ __ j(condition, to);
+}
+
+
+void RegExpMacroAssemblerX64::SafeCall(Label* to) {
+ __ call(to);
+}
+
+
+void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
+ __ bind(label);
+ __ subq(Operand(rsp, 0), code_object_pointer());
+}
+
+
+void RegExpMacroAssemblerX64::SafeReturn() {
+ __ addq(Operand(rsp, 0), code_object_pointer());
+ __ ret(0);
+}
+
+
+void RegExpMacroAssemblerX64::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ // Notice: This updates flags, unlike normal Push.
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ movl(Operand(backtrack_stackpointer(), 0), source);
+}
+
+
+void RegExpMacroAssemblerX64::Push(Immediate value) {
+ // Notice: This updates flags, unlike normal Push.
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ movl(Operand(backtrack_stackpointer(), 0), value);
+}
+
+
+void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
+ for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
+ int position = code_relative_fixup_positions_[i];
+ // The position succeeds a relative label offset from position.
+ // Patch the relative offset to be relative to the Code object pointer
+ // instead.
+ int patch_position = position - kIntSize;
+ int offset = masm_->long_at(patch_position);
+ masm_->long_at_put(patch_position,
+ offset
+ + position
+ + Code::kHeaderSize
+ - kHeapObjectTag);
+ }
+ code_relative_fixup_positions_.Clear();
+}
+
+
+void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
+ __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
+ MarkPositionForCodeRelativeFixup();
+}
+
+
+void RegExpMacroAssemblerX64::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
+ // Notice: This updates flags, unlike normal Pop.
+ __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+}
+
+
+void RegExpMacroAssemblerX64::Drop() {
+ __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+}
+
+
+void RegExpMacroAssemblerX64::CheckPreemption() {
+ // Check for preemption.
+ Label no_preempt;
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ load_rax(stack_guard_limit);
+ __ cmpq(rsp, rax);
+ __ j(above, &no_preempt);
+
+ SafeCall(&check_preempt_label_);
+
+ __ bind(&no_preempt);
+}
+
+
+void RegExpMacroAssemblerX64::CheckStackLimit() {
+ if (FLAG_check_stack) {
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ load_rax(stack_limit);
+ __ cmpq(backtrack_stackpointer(), rax);
+ __ j(above, &no_stack_overflow);
+
+ SafeCall(&stack_overflow_label_);
+
+ __ bind(&no_stack_overflow);
+ }
+}
+
+
+void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
+ // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
+ // use it, e.g., for SafeCall), we know the number of elements on the stack
+ // since the last frame alignment. We might be able to do this simpler then.
+ int frameAlignment = OS::ActivationFrameAlignment();
+ ASSERT(frameAlignment != 0);
+ // Make stack end at alignment and make room for num_arguments pointers
+ // (on Win64 only) and the original value of rsp.
+ __ movq(kScratchRegister, rsp);
+ ASSERT(IsPowerOf2(frameAlignment));
+#ifdef __MSVC__
+ // Allocate space for parameters and old rsp.
+ __ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
+ __ and_(rsp, -frameAlignment);
+ __ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
+#else
+ // Allocate space for old rsp.
+ __ subq(rsp, Immediate(kPointerSize));
+ __ and_(rsp, Immediate(-frameAlignment));
+ __ movq(Operand(rsp, 0), kScratchRegister);
+#endif
+}
+
+
+void RegExpMacroAssemblerX64::CallCFunction(Address function_address,
+ int num_arguments) {
+ // Don't compile regexps with serialization enabled. The addresses of the C++
+ // function being called isn't relocatable.
+ ASSERT(!Serializer::enabled());
+ __ movq(rax, reinterpret_cast<intptr_t>(function_address), RelocInfo::NONE);
+ __ call(rax);
+ ASSERT(OS::ActivationFrameAlignment() != 0);
+#ifdef __MSVC__
+ __ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
+#else
+ __ pop(rsp);
+#endif
+}
+
+
+void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+ } else if (characters == 2) {
+ __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+ } else {
+ ASSERT(characters == 1);
+ __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ movl(current_character(),
+ Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
+ } else {
+ ASSERT(characters == 1);
+ __ movzxwl(current_character(),
+ Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
+ }
+ }
+}
+
+
+#undef __
+}} // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h
index 209aa2d..a270bc1 100644
--- a/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/regexp-macro-assembler-x64.h
@@ -25,3 +25,271 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+
+namespace v8 {
+namespace internal {
+
+class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
+ virtual ~RegExpMacroAssemblerX64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ int cp_offset,
+ bool check_offset,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<Object> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetRegister(int register_index, int to);
+ virtual void Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ static Result Match(Handle<Code> regexp,
+ Handle<String> subject,
+ int* offsets_vector,
+ int offsets_vector_length,
+ int previous_index);
+
+ static Result Execute(Code* code,
+ String* input,
+ int start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ bool at_start);
+
+ private:
+ // Offsets from rbp of function parameters and stored registers.
+ static const int kFramePointer = 0;
+ // Above the frame pointer - function parameters and return address.
+ static const int kReturn_eip = kFramePointer + kPointerSize;
+ static const int kFrameAlign = kReturn_eip + kPointerSize;
+
+#ifdef __MSVC__
+ // Parameters (first four passed as registers, but with room on stack).
+ // In Microsoft 64-bit Calling Convention, there is room on the callers
+ // stack (before the return address) to spill parameter registers. We
+ // use this space to store the register passed parameters.
+ static const int kInputString = kFrameAlign;
+ static const int kStartIndex = kInputString + kPointerSize;
+ static const int kInputStart = kStartIndex + kPointerSize;
+ static const int kInputEnd = kInputStart + kPointerSize;
+ static const int kRegisterOutput = kInputEnd + kPointerSize;
+ static const int kAtStart = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kAtStart + kPointerSize;
+#else
+ // In AMD64 ABI Calling Convention, the first six integer parameters
+ // are passed as registers, and caller must allocate space on the stack
+ // if it wants them stored. We push the parameters after the frame pointer.
+ static const int kInputString = kFramePointer - kPointerSize;
+ static const int kStartIndex = kInputString - kPointerSize;
+ static const int kInputStart = kStartIndex - kPointerSize;
+ static const int kInputEnd = kInputStart - kPointerSize;
+ static const int kRegisterOutput = kInputEnd - kPointerSize;
+ static const int kAtStart = kRegisterOutput - kPointerSize;
+ static const int kStackHighEnd = kFrameAlign;
+#endif
+
+#ifdef __MSVC__
+ // Microsoft calling convention has three callee-saved registers
+ // (that we are using). We push these after the frame pointer.
+ static const int kBackup_rsi = kFramePointer - kPointerSize;
+ static const int kBackup_rdi = kBackup_rsi - kPointerSize;
+ static const int kBackup_rbx = kBackup_rdi - kPointerSize;
+ static const int kLastCalleeSaveRegister = kBackup_rbx;
+#else
+ // AMD64 Calling Convention has only one callee-save register that
+ // we use. We push this after the frame pointer (and after the
+ // parameters).
+ static const int kBackup_rbx = kAtStart - kPointerSize;
+ static const int kLastCalleeSaveRegister = kBackup_rbx;
+#endif
+
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kInputStartMinusOne =
+ kLastCalleeSaveRegister - kPointerSize;
+
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState();
+
+ // Called from RegExp if the backtrack stack limit is hit.
+ // Tries to expand the stack. Returns the new stack-pointer if
+ // successful, and updates the stack_top address, or returns 0 if unable
+ // to grow the stack.
+ // This function must not trigger a garbage collection.
+ static Address GrowStack(Address stack_pointer, Address* stack_top);
+
+ // The rbp-relative location of a regexp register.
+ Operand register_location(int register_index);
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return rdx; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return rcx; }
+
+ // The registers containing a self pointer to this code's Code object.
+ inline Register code_object_pointer() { return r8; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ void MarkPositionForCodeRelativeFixup() {
+ code_relative_fixup_positions_.Add(masm_->pc_offset());
+ }
+
+ void FixupCodeRelativePositions();
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to);
+ inline void SafeCallTarget(Label* label);
+ inline void SafeReturn();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer (rcx) by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pushes a value on the backtrack stack. Decrements the stack pointer (rcx)
+ // by a word size and stores the value there.
+ inline void Push(Immediate value);
+
+ // Pushes the Code object relative offset of a label on the backtrack stack
+ // (i.e., a backtrack target). Decrements the stack pointer (rcx)
+ // by a word size and stores the value there.
+ inline void Push(Label* label);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // (rcx) and increments it by a word size.
+ inline void Pop(Register target);
+
+ // Drops the top value from the backtrack stack without reading it.
+ // Increments the stack pointer (rcx) by a word size.
+ inline void Drop();
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, arguments must be stored in esp[0], esp[4],
+ // etc., not pushed. The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ inline void FrameAlign(int num_arguments);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by FrameAlign. The called function is not allowed to trigger a garbage
+ // collection, since that might move the code and invalidate the return
+ // address (unless this is somehow accounted for by the called function).
+ inline void CallCFunction(Address function_address, int num_arguments);
+
+ MacroAssembler* masm_;
+
+ ZoneList<int> code_relative_fixup_positions_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+}} // namespace v8::internal
+
+#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/V8Binding/v8/src/x64/virtual-frame-x64.cc b/V8Binding/v8/src/x64/virtual-frame-x64.cc
index 888fdc2..fb911a1 100644
--- a/V8Binding/v8/src/x64/virtual-frame-x64.cc
+++ b/V8Binding/v8/src/x64/virtual-frame-x64.cc
@@ -206,6 +206,7 @@ void VirtualFrame::EmitPush(Handle<Object> value) {
void VirtualFrame::Drop(int count) {
+ ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
diff --git a/V8Binding/v8/test/cctest/cctest.status b/V8Binding/v8/test/cctest/cctest.status
index 9abe408..6c0a9df 100644
--- a/V8Binding/v8/test/cctest/cctest.status
+++ b/V8Binding/v8/test/cctest/cctest.status
@@ -113,6 +113,7 @@ test-debug/DebuggerUnload: CRASH || FAIL
test-debug/DebuggerHostDispatch: CRASH || FAIL
test-debug/DebugBreakInMessageHandler: CRASH || FAIL
test-debug/NoDebugBreakInAfterCompileMessageHandler: CRASH || FAIL
+test-debug/RegExpDebugBreak: FAIL
test-api/Threading: CRASH || FAIL
test-api/Threading2: PASS || TIMEOUT
test-api/TryCatchSourceInfo: CRASH || FAIL
diff --git a/V8Binding/v8/test/cctest/test-api.cc b/V8Binding/v8/test/cctest/test-api.cc
index 35ac031..e1caba3 100644
--- a/V8Binding/v8/test/cctest/test-api.cc
+++ b/V8Binding/v8/test/cctest/test-api.cc
@@ -7738,3 +7738,31 @@ THREADED_TEST(PixelArray) {
free(pixel_data);
}
+
+THREADED_TEST(ScriptContextDependence) {
+ v8::HandleScope scope;
+ LocalContext c1;
+ const char *source = "foo";
+ v8::Handle<v8::Script> dep = v8::Script::Compile(v8::String::New(source));
+ v8::Handle<v8::Script> indep = v8::Script::New(v8::String::New(source));
+ c1->Global()->Set(v8::String::New("foo"), v8::Integer::New(100));
+ CHECK_EQ(dep->Run()->Int32Value(), 100);
+ CHECK_EQ(indep->Run()->Int32Value(), 100);
+ LocalContext c2;
+ c2->Global()->Set(v8::String::New("foo"), v8::Integer::New(101));
+ CHECK_EQ(dep->Run()->Int32Value(), 100);
+ CHECK_EQ(indep->Run()->Int32Value(), 101);
+}
+
+THREADED_TEST(StackTrace) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::TryCatch try_catch;
+ const char *source = "function foo() { FAIL.FAIL; }; foo();";
+ v8::Handle<v8::String> src = v8::String::New(source);
+ v8::Handle<v8::String> origin = v8::String::New("stack-trace-test");
+ v8::Script::New(src, origin)->Run();
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(try_catch.StackTrace());
+ CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
+}
diff --git a/V8Binding/v8/test/cctest/test-debug.cc b/V8Binding/v8/test/cctest/test-debug.cc
index 9e2c38d..a86317a 100644
--- a/V8Binding/v8/test/cctest/test-debug.cc
+++ b/V8Binding/v8/test/cctest/test-debug.cc
@@ -5357,3 +5357,20 @@ TEST(NoDebugBreakInAfterCompileMessageHandler) {
v8::Debug::SetMessageHandler2(NULL);
CheckDebuggerUnloaded();
}
+
+
+TEST(GetMirror) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Handle<v8::Value> obj = v8::Debug::GetMirror(v8::String::New("hodja"));
+ v8::Handle<v8::Function> run_test = v8::Handle<v8::Function>::Cast(
+ v8::Script::New(
+ v8::String::New(
+ "function runTest(mirror) {"
+ " return mirror.isString() && (mirror.length() == 5);"
+ "}"
+ ""
+ "runTest;"))->Run());
+ v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj);
+ CHECK(result->IsTrue());
+}
diff --git a/V8Binding/v8/test/cctest/test-regexp.cc b/V8Binding/v8/test/cctest/test-regexp.cc
index 8d8326c..89c7868 100644
--- a/V8Binding/v8/test/cctest/test-regexp.cc
+++ b/V8Binding/v8/test/cctest/test-regexp.cc
@@ -38,18 +38,21 @@
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
#include "regexp-macro-assembler-irregexp.h"
+#ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#endif
#ifdef V8_TARGET_ARCH_X64
-// No X64-implementation yet.
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
#endif
#ifdef V8_TARGET_ARCH_IA32
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
#endif
+#else
#include "interpreter-irregexp.h"
-
+#endif
using namespace v8::internal;
@@ -599,75 +602,20 @@ TEST(DispatchTableConstruction) {
// Tests of interpreter.
-TEST(MacroAssembler) {
- V8::Initialize(NULL);
- byte codes[1024];
- RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024));
- // ^f(o)o.
- Label fail, fail2, start;
- uc16 foo_chars[3];
- foo_chars[0] = 'f';
- foo_chars[1] = 'o';
- foo_chars[2] = 'o';
- Vector<const uc16> foo(foo_chars, 3);
- m.SetRegister(4, 42);
- m.PushRegister(4, RegExpMacroAssembler::kNoStackLimitCheck);
- m.AdvanceRegister(4, 42);
- m.GoTo(&start);
- m.Fail();
- m.Bind(&start);
- m.PushBacktrack(&fail2);
- m.CheckCharacters(foo, 0, &fail, true);
- m.WriteCurrentPositionToRegister(0, 0);
- m.PushCurrentPosition();
- m.AdvanceCurrentPosition(3);
- m.WriteCurrentPositionToRegister(1, 0);
- m.PopCurrentPosition();
- m.AdvanceCurrentPosition(1);
- m.WriteCurrentPositionToRegister(2, 0);
- m.AdvanceCurrentPosition(1);
- m.WriteCurrentPositionToRegister(3, 0);
- m.Succeed();
-
- m.Bind(&fail);
- m.Backtrack();
- m.Succeed();
- m.Bind(&fail2);
- m.PopRegister(0);
- m.Fail();
-
- v8::HandleScope scope;
-
- Handle<String> source = Factory::NewStringFromAscii(CStrVector("^f(o)o"));
- Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
- int captures[5];
-
- const uc16 str1[] = {'f', 'o', 'o', 'b', 'a', 'r'};
- Handle<String> f1_16 =
- Factory::NewStringFromTwoByte(Vector<const uc16>(str1, 6));
-
- CHECK(IrregexpInterpreter::Match(array, f1_16, captures, 0));
- CHECK_EQ(0, captures[0]);
- CHECK_EQ(3, captures[1]);
- CHECK_EQ(1, captures[2]);
- CHECK_EQ(2, captures[3]);
- CHECK_EQ(84, captures[4]);
-
- const uc16 str2[] = {'b', 'a', 'r', 'f', 'o', 'o'};
- Handle<String> f2_16 =
- Factory::NewStringFromTwoByte(Vector<const uc16>(str2, 6));
-
- CHECK(!IrregexpInterpreter::Match(array, f2_16, captures, 0));
- CHECK_EQ(42, captures[0]);
-}
-
-#ifdef V8_TARGET_ARCH_IA32 // IA32 Native Regexp only tests.
#ifdef V8_NATIVE_REGEXP
+#ifdef V8_TARGET_ARCH_IA32
+typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
+#endif
+#ifdef V8_TARGET_ARCH_X64
+typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
+#endif
+
class ContextInitializer {
public:
- ContextInitializer() : env_(), scope_(), stack_guard_() {
+ ContextInitializer()
+ : env_(), scope_(), zone_(DELETE_ON_EXIT), stack_guard_() {
env_ = v8::Context::New();
env_->Enter();
}
@@ -678,18 +626,19 @@ class ContextInitializer {
private:
v8::Persistent<v8::Context> env_;
v8::HandleScope scope_;
+ v8::internal::ZoneScope zone_;
v8::internal::StackGuard stack_guard_;
};
-static RegExpMacroAssemblerIA32::Result ExecuteIA32(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* captures,
- bool at_start) {
- return RegExpMacroAssemblerIA32::Execute(
+static ArchRegExpMacroAssembler::Result Execute(Code* code,
+ String* input,
+ int start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* captures,
+ bool at_start) {
+ return NativeRegExpMacroAssembler::Execute(
code,
input,
start_offset,
@@ -700,11 +649,11 @@ static RegExpMacroAssemblerIA32::Result ExecuteIA32(Code* code,
}
-TEST(MacroAssemblerIA32Success) {
+TEST(MacroAssemblerNativeSuccess) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 4);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4);
m.Succeed();
@@ -718,16 +667,16 @@ TEST(MacroAssemblerIA32Success) {
const byte* start_adr =
reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + seq_input->length(),
- captures,
- true);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + seq_input->length(),
+ captures,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(-1, captures[0]);
CHECK_EQ(-1, captures[1]);
CHECK_EQ(-1, captures[2]);
@@ -735,11 +684,11 @@ TEST(MacroAssemblerIA32Success) {
}
-TEST(MacroAssemblerIA32Simple) {
+TEST(MacroAssemblerNativeSimple) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 4);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -762,16 +711,16 @@ TEST(MacroAssemblerIA32Simple) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- captures,
- true);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ captures,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
CHECK_EQ(3, captures[1]);
CHECK_EQ(-1, captures[2]);
@@ -781,23 +730,23 @@ TEST(MacroAssemblerIA32Simple) {
seq_input = Handle<SeqAsciiString>::cast(input);
start_adr = seq_input->GetCharsAddress();
- result = ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- captures,
- true);
+ result = Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ captures,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::FAILURE, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-TEST(MacroAssemblerIA32SimpleUC16) {
+TEST(MacroAssemblerNativeSimpleUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::UC16, 4);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -822,16 +771,16 @@ TEST(MacroAssemblerIA32SimpleUC16) {
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- captures,
- true);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ captures,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
CHECK_EQ(3, captures[1]);
CHECK_EQ(-1, captures[2]);
@@ -842,23 +791,23 @@ TEST(MacroAssemblerIA32SimpleUC16) {
seq_input = Handle<SeqTwoByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
- result = ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length() * 2,
- captures,
- true);
+ result = Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length() * 2,
+ captures,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::FAILURE, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-TEST(MacroAssemblerIA32Backtrack) {
+TEST(MacroAssemblerNativeBacktrack) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 0);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0);
Label fail;
Label backtrack;
@@ -879,24 +828,24 @@ TEST(MacroAssemblerIA32Backtrack) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL,
- true);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ NULL,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::FAILURE, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
-TEST(MacroAssemblerIA32BackReferenceASCII) {
+TEST(MacroAssemblerNativeBackReferenceASCII) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 3);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 3);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -922,27 +871,27 @@ TEST(MacroAssemblerIA32BackReferenceASCII) {
Address start_adr = seq_input->GetCharsAddress();
int output[3];
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- output,
- true);
-
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ output,
+ true);
+
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]);
}
-TEST(MacroAssemblerIA32BackReferenceUC16) {
+TEST(MacroAssemblerNativeBackReferenceUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::UC16, 3);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 3);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -970,8 +919,8 @@ TEST(MacroAssemblerIA32BackReferenceUC16) {
Address start_adr = seq_input->GetCharsAddress();
int output[3];
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
*input,
0,
start_adr,
@@ -979,7 +928,7 @@ TEST(MacroAssemblerIA32BackReferenceUC16) {
output,
true);
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]);
@@ -987,11 +936,11 @@ TEST(MacroAssemblerIA32BackReferenceUC16) {
-TEST(MacroAssemblerIA32AtStart) {
+TEST(MacroAssemblernativeAtStart) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 0);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0);
Label not_at_start, newline, fail;
m.CheckNotAtStart(&not_at_start);
@@ -1022,34 +971,34 @@ TEST(MacroAssemblerIA32AtStart) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL,
- true);
-
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
-
- result = ExecuteIA32(*code,
- *input,
- 3,
- start_adr + 3,
- start_adr + input->length(),
- NULL,
- false);
-
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ NULL,
+ true);
+
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
+
+ result = Execute(*code,
+ *input,
+ 3,
+ start_adr + 3,
+ start_adr + input->length(),
+ NULL,
+ false);
+
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
}
-TEST(MacroAssemblerIA32BackRefNoCase) {
+TEST(MacroAssemblerNativeBackRefNoCase) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 4);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4);
Label fail, succ;
@@ -1084,16 +1033,16 @@ TEST(MacroAssemblerIA32BackRefNoCase) {
Address start_adr = seq_input->GetCharsAddress();
int output[4];
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- output,
- true);
-
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ output,
+ true);
+
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
CHECK_EQ(12, output[1]);
CHECK_EQ(0, output[2]);
@@ -1102,11 +1051,11 @@ TEST(MacroAssemblerIA32BackRefNoCase) {
-TEST(MacroAssemblerIA32Registers) {
+TEST(MacroAssemblerNativeRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 5);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 5);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
@@ -1184,8 +1133,8 @@ TEST(MacroAssemblerIA32Registers) {
Address start_adr = seq_input->GetCharsAddress();
int output[5];
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
*input,
0,
start_adr,
@@ -1193,7 +1142,7 @@ TEST(MacroAssemblerIA32Registers) {
output,
true);
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
CHECK_EQ(3, output[1]);
CHECK_EQ(6, output[2]);
@@ -1202,11 +1151,11 @@ TEST(MacroAssemblerIA32Registers) {
}
-TEST(MacroAssemblerIA32StackOverflow) {
+TEST(MacroAssemblerStackOverflow) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 0);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0);
Label loop;
m.Bind(&loop);
@@ -1224,26 +1173,26 @@ TEST(MacroAssemblerIA32StackOverflow) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- NULL,
- true);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ NULL,
+ true);
- CHECK_EQ(RegExpMacroAssemblerIA32::EXCEPTION, result);
+ CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
CHECK(Top::has_pending_exception());
Top::clear_pending_exception();
}
-TEST(MacroAssemblerIA32LotsOfRegisters) {
+TEST(MacroAssemblerNativeLotsOfRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
- RegExpMacroAssemblerIA32 m(RegExpMacroAssemblerIA32::ASCII, 2);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 2);
// At least 2048, to ensure the allocated space for registers
// span one full page.
@@ -1270,24 +1219,88 @@ TEST(MacroAssemblerIA32LotsOfRegisters) {
Address start_adr = seq_input->GetCharsAddress();
int captures[2];
- RegExpMacroAssemblerIA32::Result result =
- ExecuteIA32(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- captures,
- true);
-
- CHECK_EQ(RegExpMacroAssemblerIA32::SUCCESS, result);
+ NativeRegExpMacroAssembler::Result result =
+ Execute(*code,
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ captures,
+ true);
+
+ CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
CHECK_EQ(42, captures[1]);
Top::clear_pending_exception();
}
-#endif // V8_REGEXP_NATIVE
-#endif // V8_TARGET_ARCH_IA32
+#else // ! V8_REGEX_NATIVE
+
+TEST(MacroAssembler) {
+ V8::Initialize(NULL);
+ byte codes[1024];
+ RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024));
+ // ^f(o)o.
+ Label fail, fail2, start;
+ uc16 foo_chars[3];
+ foo_chars[0] = 'f';
+ foo_chars[1] = 'o';
+ foo_chars[2] = 'o';
+ Vector<const uc16> foo(foo_chars, 3);
+ m.SetRegister(4, 42);
+ m.PushRegister(4, RegExpMacroAssembler::kNoStackLimitCheck);
+ m.AdvanceRegister(4, 42);
+ m.GoTo(&start);
+ m.Fail();
+ m.Bind(&start);
+ m.PushBacktrack(&fail2);
+ m.CheckCharacters(foo, 0, &fail, true);
+ m.WriteCurrentPositionToRegister(0, 0);
+ m.PushCurrentPosition();
+ m.AdvanceCurrentPosition(3);
+ m.WriteCurrentPositionToRegister(1, 0);
+ m.PopCurrentPosition();
+ m.AdvanceCurrentPosition(1);
+ m.WriteCurrentPositionToRegister(2, 0);
+ m.AdvanceCurrentPosition(1);
+ m.WriteCurrentPositionToRegister(3, 0);
+ m.Succeed();
+
+ m.Bind(&fail);
+ m.Backtrack();
+ m.Succeed();
+
+ m.Bind(&fail2);
+ m.PopRegister(0);
+ m.Fail();
+
+ v8::HandleScope scope;
+
+ Handle<String> source = Factory::NewStringFromAscii(CStrVector("^f(o)o"));
+ Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
+ int captures[5];
+
+ const uc16 str1[] = {'f', 'o', 'o', 'b', 'a', 'r'};
+ Handle<String> f1_16 =
+ Factory::NewStringFromTwoByte(Vector<const uc16>(str1, 6));
+
+ CHECK(IrregexpInterpreter::Match(array, f1_16, captures, 0));
+ CHECK_EQ(0, captures[0]);
+ CHECK_EQ(3, captures[1]);
+ CHECK_EQ(1, captures[2]);
+ CHECK_EQ(2, captures[3]);
+ CHECK_EQ(84, captures[4]);
+
+ const uc16 str2[] = {'b', 'a', 'r', 'f', 'o', 'o'};
+ Handle<String> f2_16 =
+ Factory::NewStringFromTwoByte(Vector<const uc16>(str2, 6));
+
+ CHECK(!IrregexpInterpreter::Match(array, f2_16, captures, 0));
+ CHECK_EQ(42, captures[0]);
+}
+
+#endif // ! V8_REGEXP_NATIVE
TEST(AddInverseToTable) {
diff --git a/V8Binding/v8/test/mjsunit/json.js b/V8Binding/v8/test/mjsunit/json.js
index 4758264..bf44f78 100644
--- a/V8Binding/v8/test/mjsunit/json.js
+++ b/V8Binding/v8/test/mjsunit/json.js
@@ -195,3 +195,13 @@ assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
assertEquals(undefined, JSON.stringify(undefined));
assertEquals(undefined, JSON.stringify(function () { }));
+
+function checkIllegal(str) {
+ assertThrows(function () { JSON.parse(str); }, SyntaxError);
+}
+
+checkIllegal('1); throw "foo"; (1');
+
+var x = 0;
+eval("(1); x++; (1)");
+checkIllegal('1); x++; (1');
diff --git a/V8Binding/v8/test/mjsunit/mjsunit.js b/V8Binding/v8/test/mjsunit/mjsunit.js
index 2c52a31..1fb3f02 100644
--- a/V8Binding/v8/test/mjsunit/mjsunit.js
+++ b/V8Binding/v8/test/mjsunit/mjsunit.js
@@ -179,9 +179,13 @@ function assertInstanceof(obj, type) {
function assertDoesNotThrow(code) {
try {
- eval(code);
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
} catch (e) {
- assertTrue(false, "threw an exception");
+ assertTrue(false, "threw an exception: " + (e.message || e));
}
}
diff --git a/V8Binding/v8/test/mjsunit/stack-traces.js b/V8Binding/v8/test/mjsunit/stack-traces.js
index 3bb5755..d7ece2c 100644
--- a/V8Binding/v8/test/mjsunit/stack-traces.js
+++ b/V8Binding/v8/test/mjsunit/stack-traces.js
@@ -103,22 +103,24 @@ function testStrippedCustomError() {
// Utility function for testing that the expected strings occur
// in the stack trace produced when running the given function.
-function testTrace(fun, expected, unexpected) {
+function testTrace(name, fun, expected, unexpected) {
var threw = false;
try {
fun();
} catch (e) {
for (var i = 0; i < expected.length; i++) {
- assertTrue(e.stack.indexOf(expected[i]) != -1);
+ assertTrue(e.stack.indexOf(expected[i]) != -1,
+ name + " doesn't contain expected[" + i + "]");
}
if (unexpected) {
for (var i = 0; i < unexpected.length; i++) {
- assertEquals(e.stack.indexOf(unexpected[i]), -1);
+ assertEquals(e.stack.indexOf(unexpected[i]), -1,
+ name + " contains unexpected[" + i + "]");
}
}
threw = true;
}
- assertTrue(threw);
+ assertTrue(threw, name + " didn't throw");
}
// Test that the error constructor is not shown in the trace
@@ -127,10 +129,11 @@ function testCallerCensorship() {
try {
FAIL;
} catch (e) {
- assertEquals(-1, e.stack.indexOf('at new ReferenceError'));
+ assertEquals(-1, e.stack.indexOf('at new ReferenceError'),
+ "CallerCensorship contained new ReferenceError");
threw = true;
}
- assertTrue(threw);
+ assertTrue(threw, "CallerCensorship didn't throw");
}
// Test that the explicit constructor call is shown in the trace
@@ -143,10 +146,11 @@ function testUnintendedCallerCensorship() {
}
});
} catch (e) {
- assertTrue(e.stack.indexOf('at new ReferenceError') != -1);
+ assertTrue(e.stack.indexOf('at new ReferenceError') != -1,
+ "UnintendedCallerCensorship didn't contain new ReferenceError");
threw = true;
}
- assertTrue(threw);
+ assertTrue(threw, "UnintendedCallerCensorship didn't throw");
}
// If an error occurs while the stack trace is being formatted it should
@@ -161,9 +165,10 @@ function testErrorsDuringFormatting() {
n.foo();
} catch (e) {
threw = true;
- assertTrue(e.stack.indexOf('<error: ReferenceError') != -1);
+ assertTrue(e.stack.indexOf('<error: ReferenceError') != -1,
+ "ErrorsDuringFormatting didn't contain error: ReferenceError");
}
- assertTrue(threw);
+ assertTrue(threw, "ErrorsDuringFormatting didn't throw");
threw = false;
// Now we can't even format the message saying that we couldn't format
// the stack frame. Put that in your pipe and smoke it!
@@ -172,26 +177,28 @@ function testErrorsDuringFormatting() {
n.foo();
} catch (e) {
threw = true;
- assertTrue(e.stack.indexOf('<error>') != -1);
+ assertTrue(e.stack.indexOf('<error>') != -1,
+ "ErrorsDuringFormatting didn't contain <error>");
}
- assertTrue(threw);
-}
-
-testTrace(testArrayNative, ["Array.map (native)"]);
-testTrace(testNested, ["at one", "at two", "at three"]);
-testTrace(testMethodNameInference, ["at Foo.bar"]);
-testTrace(testImplicitConversion, ["at Nirk.valueOf"]);
-testTrace(testEval, ["at Doo (eval at testEval"]);
-testTrace(testNestedEval, ["eval at Inner (eval at Outer"]);
-testTrace(testValue, ["at Number.causeError"]);
-testTrace(testConstructor, ["new Plonk"]);
-testTrace(testRenamedMethod, ["Wookie.a$b$c$d [as d]"]);
-testTrace(testAnonymousMethod, ["Array.<anonymous>"]);
-testTrace(testDefaultCustomError, ["hep-hey", "new CustomError"],
+ assertTrue(threw, "ErrorsDuringFormatting didnt' throw (2)");
+}
+
+
+testTrace("testArrayNative", testArrayNative, ["Array.map (native)"]);
+testTrace("testNested", testNested, ["at one", "at two", "at three"]);
+testTrace("testMethodNameInference", testMethodNameInference, ["at Foo.bar"]);
+testTrace("testImplicitConversion", testImplicitConversion, ["at Nirk.valueOf"]);
+testTrace("testEval", testEval, ["at Doo (eval at testEval"]);
+testTrace("testNestedEval", testNestedEval, ["eval at Inner (eval at Outer"]);
+testTrace("testValue", testValue, ["at Number.causeError"]);
+testTrace("testConstructor", testConstructor, ["new Plonk"]);
+testTrace("testRenamedMethod", testRenamedMethod, ["Wookie.a$b$c$d [as d]"]);
+testTrace("testAnonymousMethod", testAnonymousMethod, ["Array.<anonymous>"]);
+testTrace("testDefaultCustomError", testDefaultCustomError,
+ ["hep-hey", "new CustomError"],
["collectStackTrace"]);
-testTrace(testStrippedCustomError, ["hep-hey"], ["new CustomError",
- "collectStackTrace"]);
-
+testTrace("testStrippedCustomError", testStrippedCustomError, ["hep-hey"],
+ ["new CustomError", "collectStackTrace"]);
testCallerCensorship();
testUnintendedCallerCensorship();
testErrorsDuringFormatting();
diff --git a/V8Binding/v8/test/mjsunit/tools/logreader.js b/V8Binding/v8/test/mjsunit/tools/logreader.js
index dfd7f9f..8ed5ffd 100644
--- a/V8Binding/v8/test/mjsunit/tools/logreader.js
+++ b/V8Binding/v8/test/mjsunit/tools/logreader.js
@@ -80,3 +80,19 @@
assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
})();
+
+
+// See http://code.google.com/p/v8/issues/detail?id=420
+(function testReadingTruncatedLog() {
+ // Having an incorrect event in the middle of a log should throw an exception.
+ var reader1 = new devtools.profiler.LogReader({});
+ assertThrows(function() {
+ reader1.processLogChunk('alias,a,b\nxxxx\nalias,c,d\n');
+ });
+
+ // But having it as the last record should not.
+ var reader2 = new devtools.profiler.LogReader({});
+ assertDoesNotThrow(function() {
+ reader2.processLogChunk('alias,a,b\nalias,c,d\nxxxx');
+ });
+})();
diff --git a/V8Binding/v8/test/mjsunit/tools/tickprocessor.js b/V8Binding/v8/test/mjsunit/tools/tickprocessor.js
index 00c3fb1..83bdac8 100644
--- a/V8Binding/v8/test/mjsunit/tools/tickprocessor.js
+++ b/V8Binding/v8/test/mjsunit/tools/tickprocessor.js
@@ -227,6 +227,78 @@
})();
+// http://code.google.com/p/v8/issues/detail?id=427
+(function testWindowsProcessExeAndDllMapFile() {
+ function exeSymbols(exeName) {
+ return [
+ ' 0000:00000000 ___ImageBase 00400000 <linker-defined>',
+ ' 0001:00000780 ?RunMain@@YAHHQAPAD@Z 00401780 f shell.obj',
+ ' 0001:00000ac0 _main 00401ac0 f shell.obj',
+ ''
+ ].join('\r\n');
+ }
+
+ function dllSymbols(dllName) {
+ return [
+ ' 0000:00000000 ___ImageBase 01c30000 <linker-defined>',
+ ' 0001:00000780 _DllMain@12 01c31780 f libcmt:dllmain.obj',
+ ' 0001:00000ac0 ___DllMainCRTStartup 01c31ac0 f libcmt:dllcrt0.obj',
+ ''
+ ].join('\r\n');
+ }
+
+ var oldRead = read;
+
+ read = exeSymbols;
+ var exe_exe_syms = [];
+ (new WindowsCppEntriesProvider()).parseVmSymbols(
+ 'chrome.exe', 0x00400000, 0x00472000,
+ function (name, start, end) {
+ exe_exe_syms.push(Array.prototype.slice.apply(arguments, [0]));
+ });
+ assertEquals(
+ [['RunMain', 0x00401780, 0x00401ac0],
+ ['_main', 0x00401ac0, 0x00472000]],
+ exe_exe_syms, '.exe with .exe symbols');
+
+ read = dllSymbols;
+ var exe_dll_syms = [];
+ (new WindowsCppEntriesProvider()).parseVmSymbols(
+ 'chrome.exe', 0x00400000, 0x00472000,
+ function (name, start, end) {
+ exe_dll_syms.push(Array.prototype.slice.apply(arguments, [0]));
+ });
+ assertEquals(
+ [],
+ exe_dll_syms, '.exe with .dll symbols');
+
+ read = dllSymbols;
+ var dll_dll_syms = [];
+ (new WindowsCppEntriesProvider()).parseVmSymbols(
+ 'chrome.dll', 0x01c30000, 0x02b80000,
+ function (name, start, end) {
+ dll_dll_syms.push(Array.prototype.slice.apply(arguments, [0]));
+ });
+ assertEquals(
+ [['_DllMain@12', 0x01c31780, 0x01c31ac0],
+ ['___DllMainCRTStartup', 0x01c31ac0, 0x02b80000]],
+ dll_dll_syms, '.dll with .dll symbols');
+
+ read = exeSymbols;
+ var dll_exe_syms = [];
+ (new WindowsCppEntriesProvider()).parseVmSymbols(
+ 'chrome.dll', 0x01c30000, 0x02b80000,
+ function (name, start, end) {
+ dll_exe_syms.push(Array.prototype.slice.apply(arguments, [0]));
+ });
+ assertEquals(
+ [],
+ dll_exe_syms, '.dll with .exe symbols');
+
+ read = oldRead;
+})();
+
+
function CppEntriesProviderMock() {
};
diff --git a/V8Binding/v8/test/mozilla/mozilla.status b/V8Binding/v8/test/mozilla/mozilla.status
index 74ba8f3..a1551dc 100644
--- a/V8Binding/v8/test/mozilla/mozilla.status
+++ b/V8Binding/v8/test/mozilla/mozilla.status
@@ -278,6 +278,11 @@ js1_2/regexp/beginLine: FAIL_OK
js1_2/regexp/endLine: FAIL_OK
+# To be compatible with safari typeof a regexp yields 'function';
+# in firefox it yields 'object'.
+js1_2/function/regexparg-1: FAIL_OK
+
+
# Date trouble?
js1_5/Date/regress-301738-02: FAIL_OK
@@ -803,16 +808,3 @@ ecma/Expressions/11.7.3: SKIP
ecma/Expressions/11.10-3: SKIP
ecma/Expressions/11.7.1: SKIP
ecma_3/RegExp/regress-209067: SKIP
-
-[ $ARCH == x64 ]
-
-# Tests that fail on the 64-bit port. This section should be empty
-# when the 64-bit port is fully debugged.
-
-js1_2/regexp/regress-9141: FAIL
-js1_5/Regress/regress-211590: CRASH
-js1_5/Regress/regress-303213: PASS || CRASH
-js1_5/extensions/regress-336410-2: CRASH
-js1_5/extensions/regress-336410-1: CRASH
-js1_5/Function/regress-338001: FAIL || CRASH
-js1_5/extensions/regress-371636: CRASH
diff --git a/V8Binding/v8/tools/gyp/v8.gyp b/V8Binding/v8/tools/gyp/v8.gyp
index 365d87c..b0c3331 100644
--- a/V8Binding/v8/tools/gyp/v8.gyp
+++ b/V8Binding/v8/tools/gyp/v8.gyp
@@ -32,6 +32,7 @@
'gcc_version%': 'unknown',
'target_arch%': 'ia32',
'v8_use_snapshot%': 'true',
+ 'v8_regexp%': 'native',
},
'includes': [
'../../../build/common.gypi',
@@ -55,6 +56,7 @@
['target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
+ 'V8_NATIVE_REGEXP',
],
}],
],
@@ -428,14 +430,18 @@
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/macro-assembler-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
'../../src/ia32/register-allocator-ia32.cc',
'../../src/ia32/stub-cache-ia32.cc',
'../../src/ia32/virtual-frame-ia32.cc',
'../../src/ia32/virtual-frame-ia32.h',
],
}],
+ ['target_arch=="ia32" and v8_regexp=="native"', {
+ 'sources': [
+ '../../src/ia32/regexp-macro-assembler-ia32.cc',
+ '../../src/ia32/regexp-macro-assembler-ia32.h',
+ ],
+ }],
['target_arch=="x64"', {
'include_dirs+': [
'../../src/x64',
@@ -457,14 +463,18 @@
'../../src/x64/jump-target-x64.cc',
'../../src/x64/macro-assembler-x64.cc',
'../../src/x64/macro-assembler-x64.h',
- #'../../src/x64/regexp-macro-assembler-x64.cc',
- #'../../src/x64/regexp-macro-assembler-x64.h',
'../../src/x64/register-allocator-x64.cc',
'../../src/x64/stub-cache-x64.cc',
'../../src/x64/virtual-frame-x64.cc',
'../../src/x64/virtual-frame-x64.h',
],
}],
+ ['target_arch=="x64" and v8_regexp=="native"', {
+ 'sources': [
+ '../../src/x64/regexp-macro-assembler-x64.cc',
+ '../../src/x64/regexp-macro-assembler-x64.h',
+ ],
+ }],
['OS=="linux"', {
'link_settings': {
'libraries': [
diff --git a/V8Binding/v8/tools/logreader.js b/V8Binding/v8/tools/logreader.js
index 78085a4..88ab907 100644
--- a/V8Binding/v8/tools/logreader.js
+++ b/V8Binding/v8/tools/logreader.js
@@ -294,8 +294,11 @@ devtools.profiler.LogReader.prototype.processLog_ = function(lines) {
this.dispatchLogRow_(fields);
}
} catch (e) {
- this.printError('line ' + (i + 1) + ': ' + (e.message || e));
- throw e;
+ // An error on the last line is acceptable since log file can be truncated.
+ if (i < n - 1) {
+ this.printError('line ' + (i + 1) + ': ' + (e.message || e));
+ throw e;
+ }
}
};
diff --git a/V8Binding/v8/tools/mac-nm b/V8Binding/v8/tools/mac-nm
index 9c18177..07efb07 100755
--- a/V8Binding/v8/tools/mac-nm
+++ b/V8Binding/v8/tools/mac-nm
@@ -12,7 +12,7 @@
# needs to be parsed, which requires a lot of knowledge to be coded in.
if [ "`which c++filt`" == "" ]; then
- nm $@
+ nm "$@"
else
- nm $@ | c++filt -p -i
+ nm "$@" | c++filt -p -i
fi
diff --git a/V8Binding/v8/tools/tickprocessor.js b/V8Binding/v8/tools/tickprocessor.js
index 34c6195..72b3059 100644
--- a/V8Binding/v8/tools/tickprocessor.js
+++ b/V8Binding/v8/tools/tickprocessor.js
@@ -499,19 +499,32 @@ function WindowsCppEntriesProvider() {
inherits(WindowsCppEntriesProvider, CppEntriesProvider);
-WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.exe$/;
+WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.([^.]+)$/;
WindowsCppEntriesProvider.FUNC_RE =
- /^ 0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
+ /^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
+
+
+WindowsCppEntriesProvider.IMAGE_BASE_RE =
+ /^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
+
+
+// This is almost a constant on Windows.
+WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
- // Only try to load symbols for the .exe file.
if (!fileNameFields) return;
var mapFileName = fileNameFields[1] + '.map';
- this.symbols = readFile(mapFileName);
+ this.moduleType_ = fileNameFields[2].toLowerCase();
+ try {
+ this.symbols = read(mapFileName);
+ } catch (e) {
+ // If .map file cannot be found let's not panic.
+ this.symbols = '';
+ }
};
@@ -523,6 +536,18 @@ WindowsCppEntriesProvider.prototype.parseNextLine = function() {
var line = this.symbols.substring(this.parsePos, lineEndPos);
this.parsePos = lineEndPos + 2;
+
+ // Image base entry is above all other symbols, so we can just
+ // terminate parsing.
+ var imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
+ if (imageBaseFields) {
+ var imageBase = parseInt(imageBaseFields[1], 16);
+ if ((this.moduleType_ == 'exe') !=
+ (imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
+ return false;
+ }
+ }
+
var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
return fields ?
{ name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
diff --git a/WEBKIT_MERGE_REVISION b/WEBKIT_MERGE_REVISION
index c1ba036..edaf0cb 100644
--- a/WEBKIT_MERGE_REVISION
+++ b/WEBKIT_MERGE_REVISION
@@ -2,4 +2,4 @@ We sync with Chromium release revision, which has both webkit revision and V8 re
http://src.chromium.org/svn/branches/187/src@18043
http://svn.webkit.org/repository/webkit/trunk@44544
- http://v8.googlecode.com/svn/branches/bleeding_edge@2654
+ http://v8.googlecode.com/svn/branches/bleeding_edge@2703