diff --git a/deps/chakrashim/core/.gitignore b/deps/chakrashim/core/.gitignore
index 7c2254493f6..10193337f87 100644
--- a/deps/chakrashim/core/.gitignore
+++ b/deps/chakrashim/core/.gitignore
@@ -39,7 +39,11 @@ build_*.log
build_*.wrn
Build/ipch/
Build/swum-cache.txt
-Build/VCBuild*/
+Build/VCBuild.Lite/
+Build/VCBuild.NoJIT/
+Build/VCBuild.SWB/
+Build/VCBuild.ClangCL/
+Build/VCBuild/
buildchk.*
buildfre.*
out/
diff --git a/deps/chakrashim/core/Build/Chakra.Build.Clang.Default.props b/deps/chakrashim/core/Build/Chakra.Build.Clang.Default.props
index dfb766246c5..4ad30b8877a 100644
--- a/deps/chakrashim/core/Build/Chakra.Build.Clang.Default.props
+++ b/deps/chakrashim/core/Build/Chakra.Build.Clang.Default.props
@@ -1,7 +1,7 @@
-
+
LLVM-vs2014
diff --git a/deps/chakrashim/core/Build/Chakra.Build.Clang.props b/deps/chakrashim/core/Build/Chakra.Build.Clang.props
index 61637d60cae..2f697fdf1ad 100644
--- a/deps/chakrashim/core/Build/Chakra.Build.Clang.props
+++ b/deps/chakrashim/core/Build/Chakra.Build.Clang.props
@@ -61,7 +61,9 @@
-Wno-microsoft-extra-qualification
-Wno-microsoft-default-arg-redefinition
-Wno-microsoft-exception-spec
- -v
+ -Wno-clang-cl-pch
+ -Wno-unused-lambda-capture
+ -Wno-pragma-pack
OldStyle
diff --git a/deps/chakrashim/core/Build/Chakra.Build.Clang.targets b/deps/chakrashim/core/Build/Chakra.Build.Clang.targets
index cefd385aad2..e2d2b5d70d2 100644
--- a/deps/chakrashim/core/Build/Chakra.Build.Clang.targets
+++ b/deps/chakrashim/core/Build/Chakra.Build.Clang.targets
@@ -126,4 +126,4 @@
>
-
\ No newline at end of file
+
diff --git a/deps/chakrashim/core/Build/Common.Build.Default.props b/deps/chakrashim/core/Build/Common.Build.Default.props
index 07d1dd35f69..815121e3ab9 100644
--- a/deps/chakrashim/core/Build/Common.Build.Default.props
+++ b/deps/chakrashim/core/Build/Common.Build.Default.props
@@ -17,7 +17,6 @@
v120
v140
v141
- v142
diff --git a/deps/chakrashim/core/Build/Common.Build.props b/deps/chakrashim/core/Build/Common.Build.props
index 6bbec5610c6..85d6787d486 100644
--- a/deps/chakrashim/core/Build/Common.Build.props
+++ b/deps/chakrashim/core/Build/Common.Build.props
@@ -65,8 +65,6 @@
true
- Guard
-
true
MultiThreadedDLL
diff --git a/deps/chakrashim/core/Build/NuGet/.pack-version b/deps/chakrashim/core/Build/NuGet/.pack-version
index 97ba4c9069f..0eed1a29efd 100644
--- a/deps/chakrashim/core/Build/NuGet/.pack-version
+++ b/deps/chakrashim/core/Build/NuGet/.pack-version
@@ -1 +1 @@
-1.11.15
+1.12.0
diff --git a/deps/chakrashim/core/Build/NuGet/Microsoft.ChakraCore.vc140.targets b/deps/chakrashim/core/Build/NuGet/Microsoft.ChakraCore.vc140.targets
index 35f83799d39..86ed1028f48 100644
--- a/deps/chakrashim/core/Build/NuGet/Microsoft.ChakraCore.vc140.targets
+++ b/deps/chakrashim/core/Build/NuGet/Microsoft.ChakraCore.vc140.targets
@@ -37,4 +37,4 @@
-
\ No newline at end of file
+
diff --git a/deps/chakrashim/core/Build/NuGet/package.ps1 b/deps/chakrashim/core/Build/NuGet/package.ps1
index ac46a4120a7..39a0a9a8e3d 100644
--- a/deps/chakrashim/core/Build/NuGet/package.ps1
+++ b/deps/chakrashim/core/Build/NuGet/package.ps1
@@ -47,4 +47,4 @@ Foreach ($nuspec in $(Get-Item $packageRoot\*.nuspec))
If (Test-Path $compiledNuspec)
{
Remove-Item $compiledNuspec
-}
\ No newline at end of file
+}
diff --git a/deps/chakrashim/core/Build/NuGet/packages.config b/deps/chakrashim/core/Build/NuGet/packages.config
deleted file mode 100644
index 577f3804c47..00000000000
--- a/deps/chakrashim/core/Build/NuGet/packages.config
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
-
-
-
diff --git a/deps/chakrashim/core/README.md b/deps/chakrashim/core/README.md
index 4bd2a370869..41d1c3496a4 100644
--- a/deps/chakrashim/core/README.md
+++ b/deps/chakrashim/core/README.md
@@ -22,61 +22,61 @@ You can stay up-to-date on progress by following the [MSEdge developer blog](htt
[a] Static | [s] Shared | [n] NoJIT | * Omitted
-[x64dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_debug/badge/icon
-[x64dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_debug/
-[x64testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_test/badge/icon
-[x64testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_test/
-[x64relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_release/badge/icon
-[x64rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_release/
-
-[x86dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_debug/badge/icon
-[x86dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_debug/
-[x86testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_test/badge/icon
-[x86testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_test/
-[x86relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_release/badge/icon
-[x86rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_release/
-
-[armdbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_debug/badge/icon
-[armdbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_debug/
-[armtesticon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_test/badge/icon
-[armtestlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_test/
-[armrelicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_release/badge/icon
-[armrellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_release/
-
-[linux_a_dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_debug/badge/icon
-[linux_a_dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_debug/
-[linux_a_testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_test/badge/icon
-[linux_a_testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_test/
-[linux_a_relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_release/badge/icon
-[linux_a_rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_release/
-
-[linux_s_dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_debug/badge/icon
-[linux_s_dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_debug/
-[linux_s_testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_test/badge/icon
-[linux_s_testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_test/
-[linux_s_relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_release/badge/icon
-[linux_s_rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_release/
-
-[linux_sn_dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_debug/badge/icon
-[linux_sn_dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_debug/
-[linux_sn_testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_test/badge/icon
-[linux_sn_testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_test/
-[linux_sn_relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_release/badge/icon
-[linux_sn_rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_release/
-
-[osx_a_dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_debug/badge/icon
-[osx_a_dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_debug/
-[osx_a_testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_test/badge/icon
-[osx_a_testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_test/
-[osx_a_relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_release/badge/icon
-[osx_a_rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_release/
-
-[osx_sn_dbgicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_debug/badge/icon
-[osx_sn_dbglink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_debug/
-[osx_sn_testicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_test/badge/icon
-[osx_sn_testlink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_test/
-[osx_sn_relicon]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_release/badge/icon
-[osx_sn_rellink]: https://ci.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_release/
+[x64dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_debug/badge/icon
+[x64dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_debug/
+[x64testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_test/badge/icon
+[x64testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_test/
+[x64relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_release/badge/icon
+[x64rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x64_release/
+
+[x86dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_debug/badge/icon
+[x86dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_debug/
+[x86testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_test/badge/icon
+[x86testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_test/
+[x86relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_release/badge/icon
+[x86rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/x86_release/
+
+[armdbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_debug/badge/icon
+[armdbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_debug/
+[armtesticon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_test/badge/icon
+[armtestlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_test/
+[armrelicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_release/badge/icon
+[armrellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/arm_release/
+
+[linux_a_dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_debug/badge/icon
+[linux_a_dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_debug/
+[linux_a_testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_test/badge/icon
+[linux_a_testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_test/
+[linux_a_relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_release/badge/icon
+[linux_a_rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_ubuntu_linux_release/
+
+[linux_s_dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_debug/badge/icon
+[linux_s_dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_debug/
+[linux_s_testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_test/badge/icon
+[linux_s_testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_test/
+[linux_s_relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_release/badge/icon
+[linux_s_rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/shared_ubuntu_linux_release/
+
+[linux_sn_dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_debug/badge/icon
+[linux_sn_dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_debug/
+[linux_sn_testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_test/badge/icon
+[linux_sn_testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_test/
+[linux_sn_relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_release/badge/icon
+[linux_sn_rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_ubuntu_linux_release/
+
+[osx_a_dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_debug/badge/icon
+[osx_a_dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_debug/
+[osx_a_testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_test/badge/icon
+[osx_a_testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_test/
+[osx_a_relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_release/badge/icon
+[osx_a_rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/static_osx_osx_release/
+
+[osx_sn_dbgicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_debug/badge/icon
+[osx_sn_dbglink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_debug/
+[osx_sn_testicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_test/badge/icon
+[osx_sn_testlink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_test/
+[osx_sn_relicon]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_release/badge/icon
+[osx_sn_rellink]: https://ci2.dot.net/job/Microsoft_ChakraCore/job/master/job/_no_jit_shared_osx_osx_release/
Above is a table of our rolling build status. We run additional builds on a daily basis. See [Build Status](https://github.com/Microsoft/ChakraCore/wiki/Build-Status) for the status of all builds and additional details.
diff --git a/deps/chakrashim/core/RegenAllByteCodeNoBuild.cmd b/deps/chakrashim/core/RegenAllByteCodeNoBuild.cmd
index 9f0baeb3515..4d7915c41ea 100644
--- a/deps/chakrashim/core/RegenAllByteCodeNoBuild.cmd
+++ b/deps/chakrashim/core/RegenAllByteCodeNoBuild.cmd
@@ -32,9 +32,4 @@ setlocal
call GenByteCode.cmd
call GenByteCode.cmd -nojit
popd
-
- pushd %_reporoot%\lib\Runtime\Library\JsBuiltIn
- call GenByteCode.cmd
- call GenByteCode.cmd -nojit
- popd
endlocal
diff --git a/deps/chakrashim/core/bin/ChakraCore/ChakraCore.vcxproj b/deps/chakrashim/core/bin/ChakraCore/ChakraCore.vcxproj
index 915a4fd670b..5aef42ee598 100644
--- a/deps/chakrashim/core/bin/ChakraCore/ChakraCore.vcxproj
+++ b/deps/chakrashim/core/bin/ChakraCore/ChakraCore.vcxproj
@@ -189,7 +189,7 @@
- Microsoft400
+ Microsoft
diff --git a/deps/chakrashim/core/bin/ChakraCore/TestHooks.cpp b/deps/chakrashim/core/bin/ChakraCore/TestHooks.cpp
index 2416eec2be3..d49ac4a4b1c 100644
--- a/deps/chakrashim/core/bin/ChakraCore/TestHooks.cpp
+++ b/deps/chakrashim/core/bin/ChakraCore/TestHooks.cpp
@@ -19,6 +19,13 @@ int LogicalStringCompareImpl(const char16* p1, int p1size, const char16* p2, int
}
}
+namespace Js
+{
+ static digit_t AddDigit(digit_t a, digit_t b, digit_t * carry);
+ static digit_t SubtractDigit(digit_t a, digit_t b, digit_t * borrow);
+ static digit_t MulDigit(digit_t a, digit_t b, digit_t * high);
+}
+
#ifdef ENABLE_TEST_HOOKS
HRESULT __stdcall SetConfigFlags(__in int argc, __in_ecount(argc) LPWSTR argv[], ICustomConfigFlags* customConfigFlags)
@@ -168,6 +175,11 @@ HRESULT OnChakraCoreLoaded(OnChakraCoreLoadedPtr pfChakraCoreLoaded)
SetEnableCheckMemoryLeakOutput,
PlatformAgnostic::UnicodeText::Internal::LogicalStringCompareImpl,
+ //BigInt hooks
+ Js::JavascriptBigInt::AddDigit,
+ Js::JavascriptBigInt::SubDigit,
+ Js::JavascriptBigInt::MulDigit,
+
#define FLAG(type, name, description, defaultValue, ...) FLAG_##type##(name)
#define FLAGINCLUDE(name) \
IsEnabled##name##Flag, \
diff --git a/deps/chakrashim/core/bin/ChakraCore/TestHooks.h b/deps/chakrashim/core/bin/ChakraCore/TestHooks.h
index 02b89958ca9..42fda3fe952 100644
--- a/deps/chakrashim/core/bin/ChakraCore/TestHooks.h
+++ b/deps/chakrashim/core/bin/ChakraCore/TestHooks.h
@@ -31,6 +31,14 @@ struct TestHooks
SetEnableCheckMemoryLeakOutputPtr pfSetEnableCheckMemoryLeakOutput;
LogicalStringCompareImpl pfLogicalCompareStringImpl;
+ // Javasscript Bigint hooks
+ typedef digit_t(TESTHOOK_CALL *AddDigit)(digit_t a, digit_t b, digit_t* carry);
+ typedef digit_t(TESTHOOK_CALL *SubDigit)(digit_t a, digit_t b, digit_t* borrow);
+ typedef digit_t(TESTHOOK_CALL *MulDigit)(digit_t a, digit_t b, digit_t* high);
+ AddDigit pfAddDigit;
+ SubDigit pfSubDigit;
+ MulDigit pfMulDigit;
+
#define FLAG(type, name, description, defaultValue, ...) FLAG_##type##(name)
#define FLAG_String(name) \
bool (TESTHOOK_CALL *pfIsEnabled##name##Flag)(); \
diff --git a/deps/chakrashim/core/bin/NativeTests/BigUIntTest.cpp b/deps/chakrashim/core/bin/NativeTests/BigUIntTest.cpp
new file mode 100644
index 00000000000..6042dab90f8
--- /dev/null
+++ b/deps/chakrashim/core/bin/NativeTests/BigUIntTest.cpp
@@ -0,0 +1,275 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+#include "stdafx.h"
+#pragma warning(disable:26434) // Function definition hides non-virtual function in base class
+#pragma warning(disable:26439) // Implicit noexcept
+#pragma warning(disable:26451) // Arithmetic overflow
+#pragma warning(disable:26495) // Uninitialized member variable
+#include "catch.hpp"
+#include "BigUIntTest.h"
+
+#pragma warning(disable:4100) // unreferenced formal parameter
+#pragma warning(disable:6387) // suppressing preFAST which raises warning for passing null to the JsRT APIs
+#pragma warning(disable:6262) // CATCH is using stack variables to report errors, suppressing the preFAST warning.
+
+namespace BigUIntTest
+{
+ TEST_CASE("Init_Compare", "[BigUIntTest]")
+ {
+ uint32 digits[1];
+ int32 length = 1;
+ Js::BigUInt bi1, bi2;
+ BOOL f;
+ int result;
+
+ digits[0] = 0x00001111;
+ f = bi1.FInitFromRglu(digits, length);
+ REQUIRE(f);
+
+ SECTION("Equal number init from the same array and length")
+ {
+ f = bi2.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ result = bi1.Compare(&bi2);
+ CHECK(result == 0);
+ }
+
+ SECTION("Equal number init from other big int number")
+ {
+ f = bi2.FInitFromBigint(&bi1);
+ REQUIRE(f);
+ result = bi1.Compare(&bi2);
+ CHECK(result == 0);
+ }
+
+ SECTION("Greater number")
+ {
+ digits[0] = 0x00000001;
+ f = bi2.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ result = bi1.Compare(&bi2);
+ CHECK(result == 1);
+ }
+
+ SECTION("Smaller number")
+ {
+ digits[0] = 0x00000001;
+ f = bi2.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ result = bi2.Compare(&bi1);
+ CHECK(result == -1);
+ }
+ }
+
+ TEST_CASE("Addition", "[BigUIntTest]")
+ {
+ uint32 digits[1], digit1s[2];
+ int32 length = 1;
+ Js::BigUInt bi1, bi2, bi3;
+ BOOL f;
+ int result;
+
+ SECTION("Check 0x33331111 + 0x33331111 = 0x66662222")
+ {
+ digits[0] = 0x33331111;
+ f = bi1.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ f = bi2.FInitFromBigint(&bi1);
+ REQUIRE(f);
+ f = bi1.FAdd(&bi2);
+ REQUIRE(f);
+ digits[0] = 0x66662222;
+ f = bi3.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ result = bi1.Compare(&bi3);
+ CHECK(result == 0);
+ }
+
+ SECTION("Check 0xffffffff + 0x1 = 0x100000000")
+ {
+ digits[0] = 0xffffffff;
+ f = bi1.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ digits[0] = 0x00000001;
+ f = bi2.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ f = bi1.FAdd(&bi2);
+ digit1s[0] = 0x0;
+ digit1s[1] = 0x1;
+ f = bi3.FInitFromRglu(digit1s, 2);
+ REQUIRE(f);
+ result = bi1.Compare(&bi3);
+ CHECK(result == 0);
+ }
+
+ SECTION("Check 0xffffffffffffffff + 0x1 = 0x10000000000000000")
+ {
+ digit1s[0] = 0xffffffff;
+ digit1s[1] = 0xffffffff;
+ f = bi1.FInitFromRglu(digit1s, 2);
+ REQUIRE(f);
+ digits[0] = 0x00000001;
+ f = bi2.FInitFromRglu(digits, 1);
+ REQUIRE(f);
+ f = bi1.FAdd(&bi2);
+ uint32 digit2s[3];
+ digit2s[0] = 0x0;
+ digit2s[1] = 0x0;
+ digit2s[2] = 0x1;
+ f = bi3.FInitFromRglu(digit2s, 3);
+ REQUIRE(f);
+ result = bi1.Compare(&bi3);
+ CHECK(result == 0);
+ }
+ }
+
+ TEST_CASE("Addition_Subtraction_Large_Number", "[BigUIntTest]")
+ {
+ const int l1 = 50, l2 = 1;
+ uint32 digit1s[l1], digit2s[l2];
+ Js::BigUInt bi1, bi2;
+ BOOL f;
+
+ SECTION("Check 0xf...0xf + 0x1 = 0x1_0x0...0x0")
+ {
+ for (int i = 0; i < l1; i++)
+ {
+ digit1s[i] = 0xffffffff;
+ }
+ f = bi1.FInitFromRglu(digit1s, l1);
+ REQUIRE(f);
+ digit2s[0] = 0x1;
+ f = bi2.FInitFromRglu(digit2s, l2);
+ REQUIRE(f);
+ f = bi1.FAdd(&bi2);
+ REQUIRE(f);
+ int32 length = bi1.Clu();
+ CHECK(length == l1 + 1);
+ uint32 digit = bi1.Lu(length - 1);
+ CHECK(digit == 1);
+ for (int i = 0; i < length - 1; i++)
+ {
+ digit = bi1.Lu(i);
+ CHECK(digit == 0);
+ }
+ }
+ }
+
+ TEST_CASE("Subtraction", "[BigUIntTest]")
+ {
+ uint32 digits[1], digit1s[2];
+ int32 length = 1;
+ Js::BigUInt bi1, bi2, bi3;
+ BOOL f;
+ int result;
+
+ SECTION("Check 0x66662222 - 0x33331111 = 0x33331111")
+ {
+ digits[0] = 0x33331111;
+ f = bi1.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ f = bi2.FInitFromBigint(&bi1);
+ REQUIRE(f);
+ digits[0] = 0x66662222;
+ f = bi3.FInitFromRglu(digits, length);
+ REQUIRE(f);
+ bi3.Subtract(&bi2);
+ result = bi1.Compare(&bi3);
+ CHECK(result == 0);
+ }
+
+ SECTION("Check 0x3_0x1 - 0x1_0x0 = 0x2_0x1")
+ {
+ digit1s[0] = 0x1;
+ digit1s[1] = 0x3;
+ f = bi3.FInitFromRglu(digit1s, 2);
+ REQUIRE(f);
+ digit1s[0] = 0x0;
+ digit1s[1] = 0x1;
+ f = bi2.FInitFromRglu(digit1s, 2);
+ REQUIRE(f);
+ bi3.Subtract(&bi2);
+ int l = bi3.Clu();
+ CHECK(l == 2);
+ int digit = bi3.Lu(1);
+ CHECK(digit == 2);
+ digit = bi3.Lu(0);
+ CHECK(digit == 1);
+ }
+
+ SECTION("Check 0x2_0x0 - 0x1 = 0x1_0xfffffff")
+ {
+ digit1s[0] = 0x0;
+ digit1s[1] = 0x2;
+ f = bi3.FInitFromRglu(digit1s, 2);
+ REQUIRE(f);
+ digits[0] = 0x1;
+ f = bi2.FInitFromRglu(digits, 1);
+ REQUIRE(f);
+ bi3.Subtract(&bi2);
+ int l = bi3.Clu();
+ CHECK(l == 2);
+ int digit = bi3.Lu(1);
+ CHECK(digit == 1);
+ digit = bi3.Lu(0);
+ CHECK(digit == 0xffffffff);
+ }
+
+ SECTION("Currently 0x1_0x0 - 0x1 is overflow")
+ {
+ }
+ }
+
+ TEST_CASE("Init_From_Char_Of_Digits", "[BigUIntTest]")
+ {
+ BigUInt biDec;
+ const char *charDigit;
+ bool result;
+ int charDigitLength;
+
+ SECTION("2**32-1 should have length = 1")
+ {
+ charDigit = "4294967295";
+ charDigitLength = 10;
+ result = biDec.FInitFromDigits(charDigit, charDigitLength, &charDigitLength);
+ REQUIRE(result);
+ int length = biDec.Clu();
+ CHECK(length == 1);
+ uint32 digit = biDec.Lu(0);
+ CHECK(digit == 4294967295);
+ }
+
+ SECTION("2**32+2 should have length = 2")
+ {
+ charDigit = "4294967298";
+ charDigitLength = 10;
+ result = biDec.FInitFromDigits(charDigit, charDigitLength, &charDigitLength);
+ REQUIRE(result);
+ int length = biDec.Clu();
+ CHECK(length == 2);
+ uint32 digit = biDec.Lu(0);
+ CHECK(digit == 2);
+ digit = biDec.Lu(1);
+ CHECK(digit == 1);
+ }
+
+ SECTION("2**64 should have length = 3")
+ {
+ charDigit = "18446744073709551616";
+ charDigitLength = 20;
+ result = biDec.FInitFromDigits(charDigit, charDigitLength, &charDigitLength);
+ REQUIRE(result);
+ int length = biDec.Clu();
+ CHECK(length == 3);
+ uint32 digit = biDec.Lu(0);
+ CHECK(digit == 0);
+ digit = biDec.Lu(1);
+ CHECK(digit == 0);
+ digit = biDec.Lu(2);
+ CHECK(digit == 1);
+ }
+ }
+}
diff --git a/deps/chakrashim/core/bin/NativeTests/BigUIntTest.h b/deps/chakrashim/core/bin/NativeTests/BigUIntTest.h
new file mode 100644
index 00000000000..d6105e9ab8e
--- /dev/null
+++ b/deps/chakrashim/core/bin/NativeTests/BigUIntTest.h
@@ -0,0 +1,49 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+// This file contains stubs needed to make BigIntTest successfully compile and link as well
+// as a means to emulate behavior of objects that interact with BigInt class
+
+#include "..\..\lib\Common\Warnings.h"
+#include "..\..\lib\Common\Core\Api.cpp"
+#include "..\..\lib\Common\Common\NumberUtilities.cpp"
+
+namespace Js
+{
+ void Throw::FatalInternalError(long)
+ {
+ Assert(false);
+ }
+
+ bool Throw::ReportAssert(__in char const *, unsigned int, __in char const *, __in char const *)
+ {
+ return false;
+ }
+
+ void Throw::LogAssert(void) {}
+}
+
+template
+double Js::NumberUtilities::StrToDbl(const EncodedChar *, const EncodedChar **, LikelyNumberType& , bool)
+{
+ Assert(false);
+ return 0.0;// don't care
+}
+
+#if defined(_M_IX86) || defined(_M_X64)
+BOOL
+AutoSystemInfo::SSE3Available() const
+{
+ Assert(false);
+ return TRUE;
+}
+
+AutoSystemInfo AutoSystemInfo::Data;
+
+void AutoSystemInfo::Initialize(void){}
+#endif
+
+#include "..\..\lib\Common\DataStructures\BigUInt.h"
+#include "..\..\lib\Common\DataStructures\BigUInt.cpp"
diff --git a/deps/chakrashim/core/bin/NativeTests/ConfigFlagsList.h b/deps/chakrashim/core/bin/NativeTests/ConfigFlagsList.h
new file mode 100644
index 00000000000..185addb2493
--- /dev/null
+++ b/deps/chakrashim/core/bin/NativeTests/ConfigFlagsList.h
@@ -0,0 +1,7 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+#pragma once
+
+// stub file for ConfigFlagsList.h
diff --git a/deps/chakrashim/core/bin/NativeTests/JavascriptBigIntTests.cpp b/deps/chakrashim/core/bin/NativeTests/JavascriptBigIntTests.cpp
new file mode 100644
index 00000000000..9c9f35c3b15
--- /dev/null
+++ b/deps/chakrashim/core/bin/NativeTests/JavascriptBigIntTests.cpp
@@ -0,0 +1,84 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+#include "stdafx.h"
+#pragma warning(disable:26434) // Function definition hides non-virtual function in base class
+#pragma warning(disable:26439) // Implicit noexcept
+#pragma warning(disable:26451) // Arithmetic overflow
+#pragma warning(disable:26495) // Uninitialized member variable
+#include "catch.hpp"
+
+#pragma warning(disable:4100) // unreferenced formal parameter
+#pragma warning(disable:6387) // suppressing preFAST which raises warning for passing null to the JsRT APIs
+#pragma warning(disable:6262) // CATCH is using stack variables to report errors, suppressing the preFAST warning.
+
+namespace JavascriptBigIntTests
+{
+ void Test_AddDigit(digit_t digit1, digit_t digit2, digit_t * carry, digit_t expectedResult, digit_t expectedCarry)
+ {
+ REQUIRE(g_testHooksLoaded);
+
+ digit_t res = g_testHooks.pfAddDigit(digit1, digit2, carry);
+
+ //test to check that the result from call to AddDigit is the expected value
+ REQUIRE(res == expectedResult);
+ REQUIRE(expectedCarry == *carry);
+ }
+
+ void Test_SubDigit(digit_t digit1, digit_t digit2, digit_t * borrow, digit_t expectedResult, digit_t expectedBorrow)
+ {
+ REQUIRE(g_testHooksLoaded);
+
+ digit_t res = g_testHooks.pfSubDigit(digit1, digit2, borrow);
+
+ //test to check that the result from call to SubtractDigit is the expected value
+ REQUIRE(res == expectedResult);
+ REQUIRE(*borrow == expectedBorrow);
+ }
+
+ void Test_MulDigit(digit_t digit1, digit_t digit2, digit_t * high, digit_t expectedResult, digit_t expectedHigh)
+ {
+ REQUIRE(g_testHooksLoaded);
+
+ digit_t res = g_testHooks.pfMulDigit(digit1, digit2, high);
+
+ //test to check that the result from call to SubtractDigit is the expected value
+ REQUIRE(res == expectedResult);
+ REQUIRE(*high == expectedHigh);
+ }
+
+ TEST_CASE("AddDigit", "[JavascriptBigIntTests]")
+ {
+ digit_t carry = 0;
+ Test_AddDigit(1, 2, &carry, 3, 0);
+
+ digit_t d1 = UINTPTR_MAX;
+ digit_t d2 = UINTPTR_MAX;
+ carry = 0;
+ Test_AddDigit(d1, d2, &carry, UINTPTR_MAX-1, 1);
+ }
+
+ TEST_CASE("SubDigit", "[JavascriptBigIntTests]")
+ {
+ digit_t borrow = 0;
+ Test_SubDigit(3, 2, &borrow, 1, 0);
+
+ digit_t d1 = 0;
+ digit_t d2 = 1;
+ borrow = 0;
+ Test_SubDigit(d1, d2, &borrow, UINTPTR_MAX, 1);
+ }
+
+ TEST_CASE("MulDigit", "[JavascriptBigIntTests]")
+ {
+ digit_t high = 0;
+ Test_MulDigit(3, 2, &high, 6, 0);
+
+ digit_t d1 = UINTPTR_MAX;
+ digit_t d2 = 2;
+ high = 0;
+ Test_MulDigit(d1, d2, &high, UINTPTR_MAX-1, 1);
+ }
+}
diff --git a/deps/chakrashim/core/bin/NativeTests/NativeTests.vcxproj b/deps/chakrashim/core/bin/NativeTests/NativeTests.vcxproj
index 12b0412c73b..cc28f224988 100644
--- a/deps/chakrashim/core/bin/NativeTests/NativeTests.vcxproj
+++ b/deps/chakrashim/core/bin/NativeTests/NativeTests.vcxproj
@@ -48,6 +48,8 @@
+
+
diff --git a/deps/chakrashim/core/bin/NativeTests/stdafx.h b/deps/chakrashim/core/bin/NativeTests/stdafx.h
index cd22fe95ada..c7a021c8c47 100644
--- a/deps/chakrashim/core/bin/NativeTests/stdafx.h
+++ b/deps/chakrashim/core/bin/NativeTests/stdafx.h
@@ -40,7 +40,7 @@ if (!(exp)) \
#define Assert(exp) AssertMsg(exp, #exp)
#define _JSRT_
-#include "chakracore.h"
+#include "ChakraCore.h"
#include "Core/CommonTypedefs.h"
#include
diff --git a/deps/chakrashim/core/bin/ch/262.js b/deps/chakrashim/core/bin/ch/262.js
index 6ff3bd31aca..d19f899155e 100644
--- a/deps/chakrashim/core/bin/ch/262.js
+++ b/deps/chakrashim/core/bin/ch/262.js
@@ -28,4 +28,4 @@ var $262 = {
getReport: function () { return WScript.GetReport(); },
},
};
-)===="
\ No newline at end of file
+)===="
diff --git a/deps/chakrashim/core/bin/ch/Debugger.cpp b/deps/chakrashim/core/bin/ch/Debugger.cpp
index 95655b57f09..b5038dbdecf 100644
--- a/deps/chakrashim/core/bin/ch/Debugger.cpp
+++ b/deps/chakrashim/core/bin/ch/Debugger.cpp
@@ -427,7 +427,14 @@ bool Debugger::DumpFunctionPosition(JsValueRef functionPosition)
bool Debugger::StartDebugging(JsRuntimeHandle runtime)
{
- IfJsrtErrorFailLogAndRetFalse(ChakraRTInterface::JsDiagStartDebugging(runtime, Debugger::DebugEventHandler, this));
+ JsErrorCode errorCode = ChakraRTInterface::JsDiagStartDebugging(runtime, Debugger::DebugEventHandler, this);
+
+ if (errorCode == JsErrorCode::JsErrorDiagAlreadyInDebugMode)
+ {
+ return false;
+ }
+
+ IfJsrtErrorFailLogAndRetFalse(errorCode);
this->m_isDetached = false;
@@ -437,7 +444,15 @@ bool Debugger::StartDebugging(JsRuntimeHandle runtime)
bool Debugger::StopDebugging(JsRuntimeHandle runtime)
{
void* callbackState = nullptr;
- IfJsrtErrorFailLogAndRetFalse(ChakraRTInterface::JsDiagStopDebugging(runtime, &callbackState));
+
+ JsErrorCode errorCode = ChakraRTInterface::JsDiagStopDebugging(runtime, &callbackState);
+
+ if (errorCode == JsErrorCode::JsErrorDiagNotInDebugMode)
+ {
+ return false;
+ }
+
+ IfJsrtErrorFailLogAndRetFalse(errorCode);
Assert(callbackState == this);
diff --git a/deps/chakrashim/core/bin/ch/Helpers.cpp b/deps/chakrashim/core/bin/ch/Helpers.cpp
index ec667f49350..3c71fd30c10 100644
--- a/deps/chakrashim/core/bin/ch/Helpers.cpp
+++ b/deps/chakrashim/core/bin/ch/Helpers.cpp
@@ -155,7 +155,7 @@ uint ConcatPath(LPCSTR filenameLeft, uint posPathSep, LPCSTR filenameRight, char
return totalLength;
}
-HRESULT Helpers::LoadScriptFromFile(LPCSTR filenameToLoad, LPCSTR& contents, UINT* lengthBytesOut /*= nullptr*/)
+HRESULT Helpers::LoadScriptFromFile(LPCSTR filenameToLoad, LPCSTR& contents, UINT* lengthBytesOut /*= nullptr*/, std::string* fullPath /*= nullptr*/, bool shouldMute /*=false */)
{
static char sHostApplicationPathBuffer[MAX_URI_LENGTH];
static uint sHostApplicationPathBufferLength = (uint) -1;
@@ -169,7 +169,7 @@ HRESULT Helpers::LoadScriptFromFile(LPCSTR filenameToLoad, LPCSTR& contents, UIN
FILE * file = NULL;
size_t bufferLength = 0;
- LPCSTR filename = filenameToLoad;
+ LPCSTR filename = fullPath == nullptr ? filenameToLoad : LPCSTR(fullPath->c_str());
if (sHostApplicationPathBufferLength == (uint)-1)
{
// consider incoming filename as the host app and base its' path for others
@@ -188,7 +188,7 @@ HRESULT Helpers::LoadScriptFromFile(LPCSTR filenameToLoad, LPCSTR& contents, UIN
}
sHostApplicationPathBuffer[sHostApplicationPathBufferLength] = char(0);
}
- else if (filename[0] != '/' && filename[0] != '\\') // make sure it's not a full path
+ else if (filename[0] != '/' && filename[0] != '\\' && fullPath == nullptr) // make sure it's not a full path
{
// concat host path and filename
uint len = ConcatPath(sHostApplicationPathBuffer, sHostApplicationPathBufferLength,
@@ -216,7 +216,7 @@ HRESULT Helpers::LoadScriptFromFile(LPCSTR filenameToLoad, LPCSTR& contents, UIN
// etc.
if (fopen_s(&file, filename, "rb") != 0)
{
- if (!HostConfigFlags::flags.MuteHostErrorMsgIsEnabled)
+ if (!HostConfigFlags::flags.MuteHostErrorMsgIsEnabled && !shouldMute)
{
#ifdef _WIN32
DWORD lastError = GetLastError();
diff --git a/deps/chakrashim/core/bin/ch/Helpers.h b/deps/chakrashim/core/bin/ch/Helpers.h
index 83c8bdff37a..fe01d35ed5f 100644
--- a/deps/chakrashim/core/bin/ch/Helpers.h
+++ b/deps/chakrashim/core/bin/ch/Helpers.h
@@ -7,7 +7,7 @@
class Helpers
{
public :
- static HRESULT LoadScriptFromFile(LPCSTR filename, LPCSTR& contents, UINT* lengthBytesOut = nullptr);
+ static HRESULT LoadScriptFromFile(LPCSTR filename, LPCSTR& contents, UINT* lengthBytesOut = nullptr, std::string* fullPath = nullptr, bool shouldMute = false);
static LPCWSTR JsErrorCodeToString(JsErrorCode jsErrorCode);
static void LogError(__in __nullterminated const char16 *msg, ...);
static HRESULT LoadBinaryFile(LPCSTR filename, LPCSTR& contents, UINT& lengthBytes, bool printFileOpenError = true);
diff --git a/deps/chakrashim/core/bin/ch/HostConfigFlagsList.h b/deps/chakrashim/core/bin/ch/HostConfigFlagsList.h
index ea4271eb8f6..395f0337ee8 100644
--- a/deps/chakrashim/core/bin/ch/HostConfigFlagsList.h
+++ b/deps/chakrashim/core/bin/ch/HostConfigFlagsList.h
@@ -17,6 +17,7 @@ FLAG(bool, IgnoreScriptErrorCode, "Don't return error code on script e
FLAG(bool, MuteHostErrorMsg, "Mute host error output, e.g. module load failures", false)
FLAG(bool, TraceHostCallback, "Output traces for host callbacks", false)
FLAG(bool, Test262, "load Test262 harness", false)
+FLAG(bool, Module, "load the script as a module", false)
FLAG(bool, TrackRejectedPromises, "Enable tracking of unhandled promise rejections", false)
FLAG(BSTR, CustomConfigFile, "Custom config file to be used to pass in additional flags to Chakra", NULL)
FLAG(bool, ExecuteWithBgParse, "[No-op] Load script with bgparse (note: requires bgparse to be on as well)", false)
diff --git a/deps/chakrashim/core/bin/ch/WScriptJsrt.cpp b/deps/chakrashim/core/bin/ch/WScriptJsrt.cpp
index 4b4fbb979d1..c5159f0a140 100644
--- a/deps/chakrashim/core/bin/ch/WScriptJsrt.cpp
+++ b/deps/chakrashim/core/bin/ch/WScriptJsrt.cpp
@@ -47,6 +47,7 @@
MessageQueue* WScriptJsrt::messageQueue = nullptr;
std::map WScriptJsrt::moduleRecordMap;
std::map WScriptJsrt::moduleDirMap;
+std::map WScriptJsrt::moduleErrMap;
std::map WScriptJsrt::scriptDirMap;
DWORD_PTR WScriptJsrt::sourceContext = 0;
@@ -223,7 +224,6 @@ JsValueRef WScriptJsrt::LoadScriptFileHelper(JsValueRef callee, JsValueRef *argu
hr = Helpers::LoadScriptFromFile(*fileName, fileContent);
if (FAILED(hr))
{
- // check if have it registered
fprintf(stderr, "Couldn't load file '%s'\n", fileName.GetString());
IfJsrtErrorSetGo(ChakraRTInterface::JsGetUndefinedValue(&returnValue));
return returnValue;
@@ -381,6 +381,15 @@ JsValueRef WScriptJsrt::LoadScriptHelper(JsValueRef callee, bool isConstructCall
Error:
if (errorCode != JsNoError)
{
+ // check and clear exception if any
+ bool hasException;
+ if (ChakraRTInterface::JsHasException(&hasException) == JsNoError && hasException)
+ {
+ JsValueRef unusedException = JS_INVALID_REFERENCE;
+ ChakraRTInterface::JsGetAndClearException(&unusedException);
+ unusedException;
+ }
+
JsValueRef errorObject;
JsValueRef errorMessageString;
@@ -436,6 +445,11 @@ void WScriptJsrt::GetDir(LPCSTR fullPathNarrow, std::string *fullDirNarrow)
*fullDirNarrow = result;
}
+JsErrorCode WScriptJsrt::ModuleEntryPoint(LPCSTR fileName, LPCSTR fileContent, LPCSTR fullName)
+{
+ return LoadModuleFromString(fileName, fileContent, fullName, true);
+}
+
JsErrorCode WScriptJsrt::LoadModuleFromString(LPCSTR fileName, LPCSTR fileContent, LPCSTR fullName, bool isFile)
{
DWORD_PTR dwSourceCookie = WScriptJsrt::GetNextSourceContext();
@@ -468,6 +482,7 @@ JsErrorCode WScriptJsrt::LoadModuleFromString(LPCSTR fileName, LPCSTR fileConten
}
moduleRecordMap[std::string(moduleRecordKey)] = requestModule;
+ moduleErrMap[requestModule] = RootModule;
}
}
else
@@ -490,9 +505,10 @@ JsErrorCode WScriptJsrt::LoadModuleFromString(LPCSTR fileName, LPCSTR fileConten
errorCode = ChakraRTInterface::JsParseModuleSource(requestModule, dwSourceCookie, (LPBYTE)fileContent,
fileContentLength, JsParseModuleSourceFlags_DataIsUTF8, &errorObject);
- if ((errorCode != JsNoError) && errorObject != JS_INVALID_REFERENCE && fileContent != nullptr && !HostConfigFlags::flags.IgnoreScriptErrorCode)
+ if ((errorCode != JsNoError) && errorObject != JS_INVALID_REFERENCE && fileContent != nullptr && !HostConfigFlags::flags.IgnoreScriptErrorCode && moduleErrMap[requestModule] == RootModule)
{
ChakraRTInterface::JsSetException(errorObject);
+ moduleErrMap[requestModule] = ErroredModule;
return errorCode;
}
return JsNoError;
@@ -1131,6 +1147,7 @@ bool WScriptJsrt::Uninitialize()
// to avoid worrying about global destructor order.
moduleRecordMap.clear();
moduleDirMap.clear();
+ moduleErrMap.clear();
scriptDirMap.clear();
auto& threadData = GetRuntimeThreadLocalData().threadData;
@@ -1218,7 +1235,6 @@ JsValueRef __stdcall WScriptJsrt::LoadTextFileCallback(JsValueRef callee, bool i
if (FAILED(hr))
{
- // check if have it registered
fprintf(stderr, "Couldn't load file '%s'\n", fileName.GetString());
IfJsrtErrorSetGo(ChakraRTInterface::JsGetUndefinedValue(&returnValue));
return returnValue;
@@ -1382,7 +1398,6 @@ JsValueRef __stdcall WScriptJsrt::LoadBinaryFileCallback(JsValueRef callee,
if (FAILED(hr))
{
- // check if have it registered
fprintf(stderr, "Couldn't load file '%s'\n", fileName.GetString());
IfJsrtErrorSetGoLabel(ChakraRTInterface::JsGetUndefinedValue(&returnValue), Error);
return returnValue;
@@ -1831,12 +1846,14 @@ HRESULT WScriptJsrt::CallbackMessage::CallFunction(LPCSTR fileName)
return hr;
}
-WScriptJsrt::ModuleMessage::ModuleMessage(JsModuleRecord module, JsValueRef specifier)
+WScriptJsrt::ModuleMessage::ModuleMessage(JsModuleRecord module, JsValueRef specifier, std::string* fullPathPtr)
: MessageBase(0), moduleRecord(module), specifier(specifier)
{
+ fullPath = nullptr;
ChakraRTInterface::JsAddRef(module, nullptr);
if (specifier != nullptr)
{
+ fullPath = new std::string (*fullPathPtr);
// nullptr specifier means a Promise to execute; non-nullptr means a "fetch" operation.
ChakraRTInterface::JsAddRef(specifier, nullptr);
}
@@ -1847,21 +1864,39 @@ WScriptJsrt::ModuleMessage::~ModuleMessage()
ChakraRTInterface::JsRelease(moduleRecord, nullptr);
if (specifier != nullptr)
{
+ delete fullPath;
ChakraRTInterface::JsRelease(specifier, nullptr);
}
}
HRESULT WScriptJsrt::ModuleMessage::Call(LPCSTR fileName)
{
- JsErrorCode errorCode;
+ JsErrorCode errorCode = JsNoError;
JsValueRef result = JS_INVALID_REFERENCE;
HRESULT hr;
if (specifier == nullptr)
{
- errorCode = ChakraRTInterface::JsModuleEvaluation(moduleRecord, &result);
- if (errorCode != JsNoError)
+ if (moduleErrMap[moduleRecord] != ErroredModule)
{
- PrintException(fileName, errorCode);
+ errorCode = ChakraRTInterface::JsModuleEvaluation(moduleRecord, &result);
+ if (errorCode != JsNoError)
+ {
+ if (moduleErrMap[moduleRecord] == RootModule)
+ {
+ PrintException(fileName, errorCode);
+ }
+ else
+ {
+ bool hasException = false;
+ ChakraRTInterface::JsHasException(&hasException);
+ if (hasException)
+ {
+ JsValueRef exception;
+ ChakraRTInterface::JsGetAndClearException(&exception);
+ exception; //unusued
+ }
+ }
+ }
}
}
else
@@ -1871,19 +1906,22 @@ HRESULT WScriptJsrt::ModuleMessage::Call(LPCSTR fileName)
errorCode = specifierStr.GetError();
if (errorCode == JsNoError)
{
- hr = Helpers::LoadScriptFromFile(*specifierStr, fileContent);
+ hr = Helpers::LoadScriptFromFile(*specifierStr, fileContent, nullptr, fullPath, true);
if (FAILED(hr))
{
- // check if have it registered
if (!HostConfigFlags::flags.MuteHostErrorMsgIsEnabled)
{
- fprintf(stderr, "Couldn't load file '%s'\n", specifierStr.GetString());
+ auto actualModuleRecord = moduleRecordMap.find(*fullPath);
+ if (actualModuleRecord == moduleRecordMap.end() || moduleErrMap[actualModuleRecord->second] == RootModule)
+ {
+ fprintf(stderr, "Couldn't load file '%s'\n", specifierStr.GetString());
+ }
}
- LoadScript(nullptr, *specifierStr, nullptr, "module", true, WScriptJsrt::FinalizeFree, false);
+ LoadScript(nullptr, fullPath == nullptr ? *specifierStr : fullPath->c_str(), nullptr, "module", true, WScriptJsrt::FinalizeFree, false);
goto Error;
}
- LoadScript(nullptr, *specifierStr, fileContent, "module", true, WScriptJsrt::FinalizeFree, true);
+ LoadScript(nullptr, fullPath == nullptr ? *specifierStr : fullPath->c_str(), fileContent, "module", true, WScriptJsrt::FinalizeFree, true);
}
}
Error:
@@ -1922,9 +1960,10 @@ JsErrorCode WScriptJsrt::FetchImportedModuleHelper(JsModuleRecord referencingMod
{
GetDir(fullPath, &moduleDirMap[moduleRecord]);
InitializeModuleInfo(specifier, moduleRecord);
- moduleRecordMap[std::string(fullPath)] = moduleRecord;
- ModuleMessage* moduleMessage =
- WScriptJsrt::ModuleMessage::Create(referencingModule, specifier);
+ std::string pathKey = std::string(fullPath);
+ moduleRecordMap[pathKey] = moduleRecord;
+ moduleErrMap[moduleRecord] = ImportedModule;
+ ModuleMessage* moduleMessage = WScriptJsrt::ModuleMessage::Create(referencingModule, specifier, &pathKey);
if (moduleMessage == nullptr)
{
return JsErrorOutOfMemory;
@@ -1959,14 +1998,6 @@ JsErrorCode WScriptJsrt::FetchImportedModule(_In_ JsModuleRecord referencingModu
JsErrorCode WScriptJsrt::FetchImportedModuleFromScript(_In_ JsSourceContext dwReferencingSourceContext,
_In_ JsValueRef specifier, _Outptr_result_maybenull_ JsModuleRecord* dependentModuleRecord)
{
- // ch.exe assumes all imported source files are located at .
- auto scriptDirEntry = scriptDirMap.find(dwReferencingSourceContext);
- if (scriptDirEntry != scriptDirMap.end())
- {
- std::string dir = scriptDirEntry->second;
- return FetchImportedModuleHelper(nullptr, specifier, dependentModuleRecord, dir.c_str());
- }
-
return FetchImportedModuleHelper(nullptr, specifier, dependentModuleRecord);
}
@@ -1994,7 +2025,8 @@ JsErrorCode WScriptJsrt::NotifyModuleReadyCallback(_In_opt_ JsModuleRecord refer
ChakraRTInterface::JsGetAndClearException(&exception);
exception; // unused
}
- else
+
+ if (exceptionVar != nullptr || moduleErrMap[referencingModule] != ErroredModule)
{
WScriptJsrt::ModuleMessage* moduleMessage =
WScriptJsrt::ModuleMessage::Create(referencingModule, nullptr);
diff --git a/deps/chakrashim/core/bin/ch/WScriptJsrt.h b/deps/chakrashim/core/bin/ch/WScriptJsrt.h
index 82007791a82..957adc9556b 100644
--- a/deps/chakrashim/core/bin/ch/WScriptJsrt.h
+++ b/deps/chakrashim/core/bin/ch/WScriptJsrt.h
@@ -5,11 +5,19 @@
#pragma once
#include
+enum ModuleState
+{
+ RootModule,
+ ImportedModule,
+ ErroredModule
+};
+
class WScriptJsrt
{
public:
static bool Initialize();
static bool Uninitialize();
+ static JsErrorCode ModuleEntryPoint(LPCSTR fileName, LPCSTR fileContent, LPCSTR fullName);
class CallbackMessage : public MessageBase
{
@@ -35,17 +43,18 @@ class WScriptJsrt
private:
JsModuleRecord moduleRecord;
JsValueRef specifier;
+ std::string* fullPath;
- ModuleMessage(JsModuleRecord module, JsValueRef specifier);
+ ModuleMessage(JsModuleRecord module, JsValueRef specifier, std::string* fullPathPtr);
public:
~ModuleMessage();
virtual HRESULT Call(LPCSTR fileName) override;
- static ModuleMessage* Create(JsModuleRecord module, JsValueRef specifier)
+ static ModuleMessage* Create(JsModuleRecord module, JsValueRef specifier, std::string* fullPath = nullptr)
{
- return new ModuleMessage(module, specifier);
+ return new ModuleMessage(module, specifier, fullPath);
}
};
@@ -139,5 +148,6 @@ class WScriptJsrt
static DWORD_PTR sourceContext;
static std::map moduleRecordMap;
static std::map moduleDirMap;
+ static std::map moduleErrMap;
static std::map scriptDirMap;
};
diff --git a/deps/chakrashim/core/bin/ch/ch.cpp b/deps/chakrashim/core/bin/ch/ch.cpp
index d365340608e..371011fbe42 100644
--- a/deps/chakrashim/core/bin/ch/ch.cpp
+++ b/deps/chakrashim/core/bin/ch/ch.cpp
@@ -452,6 +452,10 @@ HRESULT RunScript(const char* fileName, LPCSTR fileContents, size_t fileLength,
parserStateCache,
nullptr);
}
+ else if (HostConfigFlags::flags.Module)
+ {
+ runScript = WScriptJsrt::ModuleEntryPoint(fileName, fileContents, fullPath);
+ }
else // bufferValue == nullptr && parserStateCache == nullptr
{
JsValueRef scriptSource;
diff --git a/deps/chakrashim/core/bin/ch/ch.manifest b/deps/chakrashim/core/bin/ch/ch.manifest
deleted file mode 100644
index a8729d0ec0a..00000000000
--- a/deps/chakrashim/core/bin/ch/ch.manifest
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/deps/chakrashim/core/bin/ch/ch.vcxproj b/deps/chakrashim/core/bin/ch/ch.vcxproj
index 3ea7cb4f6f4..7f432caa8a4 100644
--- a/deps/chakrashim/core/bin/ch/ch.vcxproj
+++ b/deps/chakrashim/core/bin/ch/ch.vcxproj
@@ -120,12 +120,9 @@
- Microsoft400
+ Microsoft
-
-
-
-
\ No newline at end of file
+
diff --git a/deps/chakrashim/core/build.sh b/deps/chakrashim/core/build.sh
old mode 100755
new mode 100644
diff --git a/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.Build.props b/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.Build.props
index 0994ed09a0c..14eb2a7adb2 100644
--- a/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.Build.props
+++ b/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.Build.props
@@ -27,7 +27,7 @@
UCONFIG_NO_REGULAR_EXPRESSIONS=1;
UCONFIG_NO_SERVICE=1;
%(PreprocessorDefinitions)
-
+
@@ -36,6 +36,9 @@
_CRT_SECURE_NO_DEPRECATE;
%(PreprocessorDefinitions)
+
+
+ /utf-8 %(AdditionalOptions)
diff --git a/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.i18n.vcxproj b/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.i18n.vcxproj
index 8804a55dbca..de51a2f1bd4 100644
--- a/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.i18n.vcxproj
+++ b/deps/chakrashim/core/deps/Chakra.ICU/Chakra.ICU.i18n.vcxproj
@@ -31,9 +31,6 @@
%(AdditionalIncludeDirectories);
$(IcuSourceDirectory)\common
-
-
- /utf-8 %(AdditionalOptions)
Console
diff --git a/deps/chakrashim/core/jenkins/check_copyright.sh b/deps/chakrashim/core/jenkins/check_copyright.sh
old mode 100755
new mode 100644
diff --git a/deps/chakrashim/core/jenkins/check_eol.sh b/deps/chakrashim/core/jenkins/check_eol.sh
old mode 100755
new mode 100644
index ee3024fe394..f92a278f1b8
--- a/deps/chakrashim/core/jenkins/check_eol.sh
+++ b/deps/chakrashim/core/jenkins/check_eol.sh
@@ -15,7 +15,7 @@ fi
ERRFILE=check_eol.sh.err
rm -f $ERRFILE
-git diff --name-only `git merge-base origin/master HEAD` HEAD | grep -v -E "(test/.*\\.js|\\.cmd|\\.baseline|\\.wasm|\\.vcxproj|\\.vcproj|\\.sln)" | xargs -I % ./jenkins/check_file_eol.sh %
+git diff --name-only `git merge-base origin/master HEAD` HEAD | grep -v -E "(test/.*\\.js|\\.cmd|\\.baseline|\\.wasm|\\.wast|\\.vcxproj|\\.vcproj|\\.sln)" | xargs -I % ./jenkins/check_file_eol.sh %
if [ -e $ERRFILE ]; then # if error file exists then there were errors
>&2 echo "--------------" # leading >&2 means echo to stderr
diff --git a/deps/chakrashim/core/jenkins/check_file_ascii.sh b/deps/chakrashim/core/jenkins/check_file_ascii.sh
new file mode 100644
index 00000000000..fe7818ee788
--- /dev/null
+++ b/deps/chakrashim/core/jenkins/check_file_ascii.sh
@@ -0,0 +1,36 @@
+#-------------------------------------------------------------------------------------------------------
+# Copyright (C) Microsoft. All rights reserved.
+# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+#-------------------------------------------------------------------------------------------------------
+
+ERRFILE=check_ascii.sh.err
+ERRFILETEMP=$ERRFILE.0
+
+# display a helpful message for someone reading the log
+echo "Check ascii > Checking $1"
+
+if [ ! -e $1 ]; then # the file wasn't present; not necessarily an error
+ echo "WARNING: file not found: $1"
+ exit 0 # don't report an error but don't run the rest of this file
+fi
+
+# grep for non-ascii - also exclude unprintable control characters at the end of the range
+# specifically include x09 (tab) as it is used in pal sources which are not excluded
+# from this check
+LC_CTYPE=C grep -nP '[^\x09-\x7E]' $1 > $ERRFILETEMP
+if [ $? -eq 0 ]; then # grep found matches ($?==0), so we found non-ascii in the file
+ echo "ERROR: non-ascii characters were introduced in $1" >> $ERRFILE
+
+ # Display a hexdump sample of the lines with non-ascii characters in them
+ # Don't pollute the log with every single matching line, first 10 lines should be enough.
+ echo "Displaying first 10 lines of text where non-ascii characters were found:" >> $ERRFILE
+ LC_CTYPE=C grep -nP '[^\x09-\x7E]' $1 | xxd -g 1 > $ERRFILETEMP
+ head -n 10 $ERRFILETEMP >> $ERRFILE
+
+ # To help the user, display how many lines of text actually contained non-ascii characters.
+ LINECOUNT=`python -c "file=open('$ERRFILETEMP', 'r'); print len(file.readlines())"`
+ echo "Total lines containing non-ascii: $LINECOUNT" >> $ERRFILE
+ echo "--------------" >> $ERRFILE # same length as '--- ERRORS ---'
+fi
+
+rm -f $ERRFILETEMP
diff --git a/deps/chakrashim/core/jenkins/check_file_eol.sh b/deps/chakrashim/core/jenkins/check_file_eol.sh
old mode 100755
new mode 100644
index d4e48d02653..c2f82c0d5e0
--- a/deps/chakrashim/core/jenkins/check_file_eol.sh
+++ b/deps/chakrashim/core/jenkins/check_file_eol.sh
@@ -37,4 +37,10 @@ if [ $? -eq 0 ]; then # grep found matches ($?==0), so we found CR (0x0d) in the
echo "--------------" >> $ERRFILE # same length as '--- ERRORS ---'
fi
+# Check that each file ends in a newline character
+tail -c1 $1 | od -x | grep '000a' > $ERRFILETEMP
+if [ $? -ne 0 ]; then # last character is not a newline
+ echo "ERROR: $1 does not end in a new line" >> $ERRFILE
+fi
+
rm -f $ERRFILETEMP
diff --git a/deps/chakrashim/core/jenkins/check_tabs.sh b/deps/chakrashim/core/jenkins/check_tabs.sh
old mode 100755
new mode 100644
diff --git a/deps/chakrashim/core/jenkins/get_system_info.sh b/deps/chakrashim/core/jenkins/get_system_info.sh
old mode 100755
new mode 100644
diff --git a/deps/chakrashim/core/lib/Backend/AsmJsJITInfo.cpp b/deps/chakrashim/core/lib/Backend/AsmJsJITInfo.cpp
index c62cb8ba7df..5a5bae12fa7 100644
--- a/deps/chakrashim/core/lib/Backend/AsmJsJITInfo.cpp
+++ b/deps/chakrashim/core/lib/Backend/AsmJsJITInfo.cpp
@@ -97,4 +97,4 @@ AsmJsJITInfo::AccessNeedsBoundCheck(uint offset) const
{
return offset >= 0x10000;
}
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/BackendApi.cpp b/deps/chakrashim/core/lib/Backend/BackendApi.cpp
index 134555f1a08..075f8edaa7d 100644
--- a/deps/chakrashim/core/lib/Backend/BackendApi.cpp
+++ b/deps/chakrashim/core/lib/Backend/BackendApi.cpp
@@ -142,10 +142,10 @@ void CheckIsExecutable(Js::RecyclableObject * function, Js::JavascriptMethod ent
{
Js::ScriptContext * scriptContext = function->GetScriptContext();
// it's easy to call the default entry point from RecyclableObject.
- AssertMsg((Js::JavascriptFunction::Is(function) && Js::JavascriptFunction::FromVar(function)->IsExternalFunction())
+ AssertMsg((Js::VarIs(function) && Js::VarTo(function)->IsExternalFunction())
|| Js::CrossSite::IsThunk(entrypoint)
// External object with entrypoint
- || (!Js::JavascriptFunction::Is(function)
+ || (!Js::VarIs(function)
&& function->IsExternal()
&& Js::JavascriptConversion::IsCallable(function))
|| !scriptContext->IsActuallyClosed()
@@ -160,7 +160,7 @@ void CheckIsExecutable(Js::RecyclableObject * function, Js::JavascriptMethod ent
{
return;
}
-
+
Js::TypeId typeId = Js::JavascriptOperators::GetTypeId(function);
if (typeId == Js::TypeIds_HostDispatch)
{
diff --git a/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.cpp b/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.cpp
index b647a421376..c52dc963a08 100644
--- a/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.cpp
+++ b/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.cpp
@@ -68,4 +68,4 @@ namespace OpCodeAttrAsmJs
}
}; // OpCodeAttrAsmJs
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.h b/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.h
index d267d7516f6..d43359aea99 100644
--- a/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.h
+++ b/deps/chakrashim/core/lib/Backend/BackendOpCodeAttrAsmJs.h
@@ -12,4 +12,4 @@ namespace OpCodeAttrAsmJs
bool HasProfiledOp(Js::OpCodeAsmJs opcode);
bool IsProfiledOp(Js::OpCodeAsmJs opcode);
};
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/BackwardPass.cpp b/deps/chakrashim/core/lib/Backend/BackwardPass.cpp
index af1908b2bf5..09d05a17ea0 100644
--- a/deps/chakrashim/core/lib/Backend/BackwardPass.cpp
+++ b/deps/chakrashim/core/lib/Backend/BackwardPass.cpp
@@ -10,6 +10,7 @@
BackwardPass::BackwardPass(Func * func, GlobOpt * globOpt, Js::Phase tag)
: func(func), globOpt(globOpt), tag(tag), currentPrePassLoop(nullptr), tempAlloc(nullptr),
preOpBailOutInstrToProcess(nullptr),
+ considerSymAsRealUseInNoImplicitCallUses(nullptr),
isCollectionPass(false), currentRegion(nullptr),
collectionPassSubPhase(CollectionPassSubPhase::None),
isLoopPrepass(false)
@@ -309,7 +310,7 @@ BackwardPass::MarkScopeObjSymUseForStackArgOpt()
IR::Instr * instr = this->currentInstr;
if (tag == Js::DeadStorePhase)
{
- if (instr->DoStackArgsOpt(this->func) && instr->m_func->GetScopeObjSym() != nullptr && this->DoByteCodeUpwardExposedUsed())
+ if (instr->DoStackArgsOpt() && instr->m_func->GetScopeObjSym() != nullptr && this->DoByteCodeUpwardExposedUsed())
{
this->currentBlock->byteCodeUpwardExposedUsed->Set(instr->m_func->GetScopeObjSym()->m_id);
}
@@ -321,11 +322,11 @@ BackwardPass::ProcessBailOnStackArgsOutOfActualsRange()
{
IR::Instr * instr = this->currentInstr;
- if (tag == Js::DeadStorePhase &&
- (instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) &&
+ if (tag == Js::DeadStorePhase &&
+ (instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) &&
instr->HasBailOutInfo() && !IsPrePass())
{
- if (instr->DoStackArgsOpt(this->func))
+ if (instr->DoStackArgsOpt())
{
AssertMsg(instr->GetBailOutKind() & IR::BailOnStackArgsOutOfActualsRange, "Stack args bail out is not set when the optimization is turned on? ");
if (instr->GetBailOutKind() & ~IR::BailOnStackArgsOutOfActualsRange)
@@ -411,8 +412,6 @@ BackwardPass::Optimize()
candidateSymsRequiredToBeInt = &localCandidateSymsRequiredToBeInt;
BVSparse localCandidateSymsRequiredToBeLossyInt(tempAlloc);
candidateSymsRequiredToBeLossyInt = &localCandidateSymsRequiredToBeLossyInt;
- BVSparse localConsiderSymsAsRealUsesInNoImplicitCallUses(tempAlloc);
- considerSymsAsRealUsesInNoImplicitCallUses = &localConsiderSymsAsRealUsesInNoImplicitCallUses;
intOverflowCurrentlyMattersInRange = true;
FloatSymEquivalenceMap localFloatSymEquivalenceMap(tempAlloc);
@@ -737,7 +736,7 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
this->func->GetDebugNumberSet(debugStringBuffer),
block->GetBlockNum(), blockSucc->GetBlockNum());
- auto fixupFrom = [block, blockSucc, this](Bucket &bucket)
+ auto fixupFrom = [block, blockSucc, upwardExposedUses, this](Bucket &bucket)
{
AddPropertyCacheBucket *fromData = &bucket.element;
if (fromData->GetInitialType() == nullptr ||
@@ -746,10 +745,10 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
return;
}
- this->InsertTypeTransitionsAtPriorSuccessors(block, blockSucc, bucket.value, fromData);
+ this->InsertTypeTransitionsAtPriorSuccessors(block, blockSucc, bucket.value, fromData, upwardExposedUses);
};
- auto fixupTo = [blockSucc, this](Bucket &bucket)
+ auto fixupTo = [blockSucc, upwardExposedUses, this](Bucket &bucket)
{
AddPropertyCacheBucket *toData = &bucket.element;
if (toData->GetInitialType() == nullptr ||
@@ -758,7 +757,7 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
return;
}
- this->InsertTypeTransitionAtBlock(blockSucc, bucket.value, toData);
+ this->InsertTypeTransitionAtBlock(blockSucc, bucket.value, toData, upwardExposedUses);
};
if (blockSucc->stackSymToFinalType != nullptr)
@@ -1646,8 +1645,6 @@ BackwardPass::ProcessLoop(BasicBlock * lastBlock)
{
Assert(loop->symsAssignedToInLoop == nullptr);
loop->symsAssignedToInLoop = JitAnew(this->globOpt->alloc, BVSparse, this->globOpt->alloc);
- Assert(loop->preservesNumberValue == nullptr);
- loop->preservesNumberValue = JitAnew(this->globOpt->alloc, BVSparse, this->globOpt->alloc);
}
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, nullptr)
@@ -1719,19 +1716,19 @@ BackwardPass::ProcessBailOutArgObj(BailOutInfo * bailOutInfo, BVSparseTestAndClear(symId))
{
- if (bailOutInfo->usedCapturedValues.argObjSyms == nullptr)
+ if (bailOutInfo->usedCapturedValues->argObjSyms == nullptr)
{
- bailOutInfo->usedCapturedValues.argObjSyms = JitAnew(this->func->m_alloc,
+ bailOutInfo->usedCapturedValues->argObjSyms = JitAnew(this->func->m_alloc,
BVSparse, this->func->m_alloc);
}
- bailOutInfo->usedCapturedValues.argObjSyms->Set(symId);
+ bailOutInfo->usedCapturedValues->argObjSyms->Set(symId);
}
}
NEXT_BITSET_IN_SPARSEBV;
}
- if (bailOutInfo->usedCapturedValues.argObjSyms)
+ if (bailOutInfo->usedCapturedValues->argObjSyms)
{
- byteCodeUpwardExposedUsed->Minus(bailOutInfo->usedCapturedValues.argObjSyms);
+ byteCodeUpwardExposedUsed->Minus(bailOutInfo->usedCapturedValues->argObjSyms);
}
}
@@ -1741,7 +1738,7 @@ BackwardPass::ProcessBailOutConstants(BailOutInfo * bailOutInfo, BVSparsetag != Js::BackwardPhase);
// Remove constants that we are already going to restore
- SListBase * usedConstantValues = &bailOutInfo->usedCapturedValues.constantValues;
+ SListBase * usedConstantValues = &bailOutInfo->usedCapturedValues->constantValues;
FOREACH_SLISTBASE_ENTRY(ConstantStackSymValue, value, usedConstantValues)
{
byteCodeUpwardExposedUsed->Clear(value.Key()->m_id);
@@ -1773,7 +1770,7 @@ BackwardPass::ProcessBailOutCopyProps(BailOutInfo * bailOutInfo, BVSparsefunc->GetJITFunctionBody()->IsAsmJsMode());
// Remove copy prop that we were already going to restore
- SListBase * usedCopyPropSyms = &bailOutInfo->usedCapturedValues.copyPropSyms;
+ SListBase * usedCopyPropSyms = &bailOutInfo->usedCapturedValues->copyPropSyms;
FOREACH_SLISTBASE_ENTRY(CopyPropSyms, copyPropSyms, usedCopyPropSyms)
{
byteCodeUpwardExposedUsed->Clear(copyPropSyms.Key()->m_id);
@@ -1839,7 +1836,7 @@ BackwardPass::ProcessBailOutCopyProps(BailOutInfo * bailOutInfo, BVSparseGetDst() && instr->GetDst()->IsSymOpnd())
+ if (!PHASE_ON(Js::DeadStoreTypeChecksOnStoresPhase, this->func) && instr->GetDst() && instr->GetDst()->IsSymOpnd())
{
return;
}
@@ -2605,7 +2602,7 @@ BackwardPass::ProcessBailOutInfo(IR::Instr * instr, BailOutInfo * bailOutInfo)
tempBv->And(this->func->m_nonTempLocalVars, bailOutInfo->liveVarSyms);
// Remove syms that are restored in other ways than byteCodeUpwardExposedUsed.
- FOREACH_SLIST_ENTRY(ConstantStackSymValue, value, &bailOutInfo->usedCapturedValues.constantValues)
+ FOREACH_SLIST_ENTRY(ConstantStackSymValue, value, &bailOutInfo->usedCapturedValues->constantValues)
{
Assert(value.Key()->HasByteCodeRegSlot() || value.Key()->GetInstrDef()->m_opcode == Js::OpCode::BytecodeArgOutCapture);
if (value.Key()->HasByteCodeRegSlot())
@@ -2614,7 +2611,7 @@ BackwardPass::ProcessBailOutInfo(IR::Instr * instr, BailOutInfo * bailOutInfo)
}
}
NEXT_SLIST_ENTRY;
- FOREACH_SLIST_ENTRY(CopyPropSyms, value, &bailOutInfo->usedCapturedValues.copyPropSyms)
+ FOREACH_SLIST_ENTRY(CopyPropSyms, value, &bailOutInfo->usedCapturedValues->copyPropSyms)
{
Assert(value.Key()->HasByteCodeRegSlot() || value.Key()->GetInstrDef()->m_opcode == Js::OpCode::BytecodeArgOutCapture);
if (value.Key()->HasByteCodeRegSlot())
@@ -2623,9 +2620,9 @@ BackwardPass::ProcessBailOutInfo(IR::Instr * instr, BailOutInfo * bailOutInfo)
}
}
NEXT_SLIST_ENTRY;
- if (bailOutInfo->usedCapturedValues.argObjSyms)
+ if (bailOutInfo->usedCapturedValues->argObjSyms)
{
- tempBv->Minus(bailOutInfo->usedCapturedValues.argObjSyms);
+ tempBv->Minus(bailOutInfo->usedCapturedValues->argObjSyms);
}
byteCodeUpwardExposedUsed->Or(tempBv);
@@ -2804,7 +2801,7 @@ BackwardPass::ProcessBlock(BasicBlock * block)
this->currentInstr = instr;
this->currentRegion = this->currentBlock->GetFirstInstr()->AsLabelInstr()->GetRegion();
-
+
IR::Instr * insertedInstr = TryChangeInstrForStackArgOpt();
if (insertedInstr != nullptr)
{
@@ -3763,14 +3760,14 @@ BackwardPass::ProcessBlock(BasicBlock * block)
block->loop->regAlloc.liveOnBackEdgeSyms = block->upwardExposedUses->CopyNew(this->func->m_alloc);
}
- Assert(considerSymsAsRealUsesInNoImplicitCallUses->IsEmpty());
+ Assert(!considerSymAsRealUseInNoImplicitCallUses);
#if DBG_DUMP
TraceBlockUses(block, false);
#endif
}
-bool
+bool
BackwardPass::CanDeadStoreInstrForScopeObjRemoval(Sym *sym) const
{
if (tag == Js::DeadStorePhase && this->currentInstr->m_func->IsStackArgsEnabled())
@@ -3926,7 +3923,7 @@ BackwardPass::DeadStoreOrChangeInstrForScopeObjRemoval(IR::Instr ** pInstrPrev)
case Js::OpCode::GetCachedFunc:
{
// = GetCachedFunc ,
- // is converted to
+ // is converted to
// = NewScFunc ,
if (instr->GetSrc1()->IsScopeObjOpnd(currFunc))
@@ -3952,7 +3949,7 @@ IR::Instr *
BackwardPass::TryChangeInstrForStackArgOpt()
{
IR::Instr * instr = this->currentInstr;
- if (tag == Js::DeadStorePhase && instr->DoStackArgsOpt(this->func))
+ if (tag == Js::DeadStorePhase && instr->DoStackArgsOpt())
{
switch (instr->m_opcode)
{
@@ -3988,8 +3985,8 @@ BackwardPass::TryChangeInstrForStackArgOpt()
* -This is to facilitate Bailout to record the live Scope object Sym, whenever required.
* -Reason for doing is this because - Scope object has to be implicitly live whenever Heap Arguments object is live.
* -When we restore HeapArguments object in the bail out path, it expects the scope object also to be restored - if one was created.
- * -We do not know detailed information about Heap arguments obj syms(aliasing etc.) until we complete Forward Pass.
- * -And we want to avoid dead sym clean up (in this case, scope object though not explicitly live, it is live implicitly) during Block merging in the forward pass.
+ * -We do not know detailed information about Heap arguments obj syms(aliasing etc.) until we complete Forward Pass.
+ * -And we want to avoid dead sym clean up (in this case, scope object though not explicitly live, it is live implicitly) during Block merging in the forward pass.
* -Hence this is the optimal spot to do this.
*/
@@ -4026,7 +4023,7 @@ BackwardPass::IsFormalParamSym(Func * func, Sym * sym) const
if (sym->IsPropertySym())
{
- //If the sym is a propertySym, then see if the propertyId is within the range of the formals
+ //If the sym is a propertySym, then see if the propertyId is within the range of the formals
//We can have other properties stored in the scope object other than the formals (following the formals).
PropertySym * propSym = sym->AsPropertySym();
IntConstType value = propSym->m_propertyId;
@@ -4159,17 +4156,13 @@ BackwardPass::UpdateImplicitCallBailOutKind(IR::Instr *const instr, bool needsBa
IR::BailOutKind implicitCallBailOutKind = needsBailOutOnImplicitCall ? IR::BailOutOnImplicitCalls : IR::BailOutInvalid;
- IR::BailOutKind instrBailOutKind = instr->GetBailOutKind();
+ const IR::BailOutKind instrBailOutKind = instr->GetBailOutKind();
if (instrBailOutKind & IR::BailOutMarkTempObject)
{
+ // Don't remove the implicit call pre op bailout for mark temp object
// Remove the mark temp object bit, as we don't need it after the dead store pass
- instrBailOutKind &= ~IR::BailOutMarkTempObject;
- instr->SetBailOutKind(instrBailOutKind);
-
- if (!instr->GetBailOutInfo()->canDeadStore)
- {
- return true;
- }
+ instr->SetBailOutKind(instrBailOutKind & ~IR::BailOutMarkTempObject);
+ return true;
}
const IR::BailOutKind instrImplicitCallBailOutKind = instrBailOutKind & ~IR::BailOutKindBits;
@@ -4236,8 +4229,9 @@ BackwardPass::ProcessNoImplicitCallUses(IR::Instr *const instr)
{
IR::RegOpnd *const regSrc = src->AsRegOpnd();
sym = regSrc->m_sym;
- if(considerSymsAsRealUsesInNoImplicitCallUses->TestAndClear(sym->m_id))
+ if(considerSymAsRealUseInNoImplicitCallUses && considerSymAsRealUseInNoImplicitCallUses == sym)
{
+ considerSymAsRealUseInNoImplicitCallUses = nullptr;
ProcessStackSymUse(sym->AsStackSym(), true);
}
if(regSrc->IsArrayRegOpnd())
@@ -4329,10 +4323,7 @@ BackwardPass::ProcessNoImplicitCallDef(IR::Instr *const instr)
const bool transferArrayLengthSymUse = !!currentBlock->noImplicitCallArrayLengthSymUses->TestAndClear(dstSym->m_id);
IR::Opnd *const src = instr->GetSrc1();
-
- // Stop attempting to transfer noImplicitCallUses symbol if the instr is not a transfer instr (based on the opcode's
- // flags) or does not have the attributes to be a transfer instr (based on the existance of src and src2).
- if(!src || (instr->GetSrc2() && !OpCodeAttr::NonIntTransfer(instr->m_opcode)))
+ if(!src || instr->GetSrc2())
{
return;
}
@@ -4655,6 +4646,7 @@ BackwardPass::ProcessArrayRegOpndUse(IR::Instr *const instr, IR::ArrayRegOpnd *c
// ProcessNoImplicitCallUses automatically marks JS array reg opnds and their corresponding syms as live. A typed
// array's head segment length sym also needs to be marked as live at its use in the NoImplicitCallUses instruction,
// but it is just in a reg opnd. Flag the opnd to have the sym be marked as live when that instruction is processed.
+ Assert(!considerSymAsRealUseInNoImplicitCallUses);
IR::Opnd *const use =
FindNoImplicitCallUse(
instr,
@@ -4665,7 +4657,7 @@ BackwardPass::ProcessArrayRegOpndUse(IR::Instr *const instr, IR::ArrayRegOpnd *c
});
if(use)
{
- considerSymsAsRealUsesInNoImplicitCallUses->Set(arrayRegOpnd->HeadSegmentLengthSym()->m_id);
+ considerSymAsRealUseInNoImplicitCallUses = arrayRegOpnd->HeadSegmentLengthSym();
}
}
}
@@ -4692,9 +4684,10 @@ BackwardPass::ProcessNewScObject(IR::Instr* instr)
return;
}
- if (instr->HasBailOutInfo() && (instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutFailedCtorGuardCheck)
+ if (instr->HasBailOutInfo())
{
Assert(instr->IsProfiledInstr());
+ Assert(instr->GetBailOutKind() == IR::BailOutFailedCtorGuardCheck);
Assert(instr->GetDst()->IsRegOpnd());
BasicBlock * block = this->currentBlock;
@@ -4720,7 +4713,7 @@ BackwardPass::ProcessNewScObject(IR::Instr* instr)
Assert(pBucket->GetInitialType() == ctorCache->GetType());
if (!this->IsPrePass())
{
- this->InsertTypeTransition(instr->m_next, objSym, pBucket);
+ this->InsertTypeTransition(instr->m_next, objSym, pBucket, block->upwardExposedUses);
}
#if DBG
pBucket->deadStoreUnavailableInitialType = pBucket->GetInitialType();
@@ -5019,24 +5012,16 @@ BackwardPass::UpdateArrayBailOutKind(IR::Instr *const instr)
return;
}
- instr->GetDst()->AsIndirOpnd()->AllowConversion(true);
IR::BailOutKind includeBailOutKinds = IR::BailOutInvalid;
if (!baseValueType.IsNotNativeArray() &&
+ (!baseValueType.IsLikelyNativeArray() || instr->GetSrc1()->IsVar()) &&
!currentBlock->noImplicitCallNativeArrayUses->IsEmpty() &&
!(instr->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall))
{
// There is an upwards-exposed use of a native array. Since the array referenced by this instruction can be aliased,
// this instruction needs to bail out if it converts the native array even if this array specifically is not
// upwards-exposed.
- if (!baseValueType.IsLikelyNativeArray() || instr->GetSrc1()->IsVar())
- {
- includeBailOutKinds |= IR::BailOutConvertedNativeArray;
- }
- else
- {
- // We are assuming that array conversion is impossible here, so make sure we execute code that fails if conversion does happen.
- instr->GetDst()->AsIndirOpnd()->AllowConversion(false);
- }
+ includeBailOutKinds |= IR::BailOutConvertedNativeArray;
}
if(baseOpnd->IsArrayRegOpnd() && baseOpnd->AsArrayRegOpnd()->EliminatedUpperBoundCheck())
@@ -5306,7 +5291,7 @@ BackwardPass::ProcessPropertySymOpndUse(IR::PropertySymOpnd * opnd)
pBucket->GetFinalType() != nullptr &&
pBucket->GetFinalType() != pBucket->GetInitialType())
{
- this->InsertTypeTransition(this->currentInstr->m_next, baseSym, pBucket);
+ this->InsertTypeTransition(this->currentInstr->m_next, baseSym, pBucket, block->upwardExposedUses);
pBucket->SetFinalType(pBucket->GetInitialType());
}
}
@@ -5325,9 +5310,6 @@ BackwardPass::ProcessPropertySymOpndUse(IR::PropertySymOpnd * opnd)
void
BackwardPass::TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *block)
{
- StackSym *auxSlotPtrSym = nullptr;
- bool auxSlotPtrUpwardExposed = false;
-
Assert(tag == Js::DeadStorePhase);
Assert(opnd->IsTypeCheckSeqCandidate());
@@ -5394,7 +5376,6 @@ BackwardPass::TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *
#endif
bucket->AddToGuardedPropertyOps(opnd->GetObjTypeSpecFldId());
- auxSlotPtrUpwardExposed = PHASE_ON(Js::ReuseAuxSlotPtrPhase, this->func) && opnd->UsesAuxSlot() && !opnd->IsLoadedFromProto() && opnd->IsTypeChecked();
if (opnd->NeedsMonoCheck())
{
@@ -5429,14 +5410,7 @@ BackwardPass::TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *
// Some instr protected by this one requires a monomorphic type check. (E.g., final type opt,
// fixed field not loaded from prototype.) Note the IsTypeAvailable test above: only do this at
// the initial type check that protects this path.
- if (!opnd->SetMonoGuardType(bucket->GetMonoGuardType()))
- {
- // We can't safely check for the required type here. Clear the objtypespec info to disable optimization
- // using this inline cache, since there appears to be a mismatch, and re-jit.
- // (Dead store pass is too late to generate the bailout points we need to use this type correctly.)
- this->currentInstr->m_func->ClearObjTypeSpecFldInfo(opnd->m_inlineCacheIndex);
- throw Js::RejitException(RejitReason::FailedEquivalentTypeCheck);
- }
+ opnd->SetMonoGuardType(bucket->GetMonoGuardType());
this->currentInstr->ChangeEquivalentToMonoTypeCheckBailOut();
}
bucket->SetMonoGuardType(nullptr);
@@ -5448,12 +5422,6 @@ BackwardPass::TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *
bucket->SetGuardedPropertyOps(nullptr);
JitAdelete(this->tempAlloc, guardedPropertyOps);
block->stackSymToGuardedProperties->Clear(objSym->m_id);
- auxSlotPtrSym = opnd->GetAuxSlotPtrSym();
- if (auxSlotPtrSym)
- {
- this->currentBlock->upwardExposedUses->Clear(auxSlotPtrSym->m_id);
- }
- auxSlotPtrUpwardExposed = false;
}
}
#if DBG
@@ -5472,11 +5440,25 @@ BackwardPass::TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *
opnd->SetGuardedPropOp(opnd->GetObjTypeSpecFldId());
}
- if (auxSlotPtrUpwardExposed)
+ if (opnd->UsesAuxSlot() && opnd->IsTypeCheckSeqParticipant() && !opnd->HasTypeMismatch() && !opnd->IsLoadedFromProto())
{
- // This is an upward-exposed use of the aux slot pointer.
- auxSlotPtrSym = opnd->EnsureAuxSlotPtrSym(this->func);
- this->currentBlock->upwardExposedUses->Set(auxSlotPtrSym->m_id);
+ bool auxSlotPtrUpwardExposed = false;
+ StackSym *auxSlotPtrSym = opnd->GetAuxSlotPtrSym();
+ if (opnd->IsAuxSlotPtrSymAvailable())
+ {
+ // This is an upward-exposed use of the aux slot pointer.
+ Assert(auxSlotPtrSym);
+ auxSlotPtrUpwardExposed = this->currentBlock->upwardExposedUses->TestAndSet(auxSlotPtrSym->m_id);
+ }
+ else if (auxSlotPtrSym != nullptr)
+ {
+ // The aux slot pointer is not upward-exposed at this point.
+ auxSlotPtrUpwardExposed = this->currentBlock->upwardExposedUses->TestAndClear(auxSlotPtrSym->m_id);
+ }
+ if (!this->IsPrePass() && auxSlotPtrUpwardExposed)
+ {
+ opnd->SetProducesAuxSlotPtr(true);
+ }
}
}
@@ -5732,16 +5714,18 @@ BackwardPass::TrackAddPropertyTypes(IR::PropertySymOpnd *opnd, BasicBlock *block
}
void
-BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, int symId, AddPropertyCacheBucket *data)
+BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses)
{
StackSym *objSym = this->func->m_symTable->FindStackSym(symId);
Assert(objSym);
- this->InsertTypeTransition(instrInsertBefore, objSym, data);
+ this->InsertTypeTransition(instrInsertBefore, objSym, data, upwardExposedUses);
}
void
-BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSym, AddPropertyCacheBucket *data)
+BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSym, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses)
{
+ Assert(!this->IsPrePass());
+
IR::RegOpnd *baseOpnd = IR::RegOpnd::New(objSym, TyMachReg, this->func);
baseOpnd->SetIsJITOptimizedReg(true);
@@ -5758,7 +5742,7 @@ BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSy
IR::Instr *adjustTypeInstr =
IR::Instr::New(Js::OpCode::AdjustObjType, finalTypeOpnd, baseOpnd, initialTypeOpnd, this->func);
- if (this->currentBlock->upwardExposedUses)
+ if (upwardExposedUses)
{
// If this type change causes a slot adjustment, the aux slot pointer (if any) will be reloaded here, so take it out of upwardExposedUses.
int oldCount;
@@ -5772,7 +5756,10 @@ BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSy
StackSym *auxSlotPtrSym = baseOpnd->m_sym->GetAuxSlotPtrSym();
if (auxSlotPtrSym)
{
- this->currentBlock->upwardExposedUses->Clear(auxSlotPtrSym->m_id);
+ if (upwardExposedUses->Test(auxSlotPtrSym->m_id))
+ {
+ adjustTypeInstr->m_opcode = Js::OpCode::AdjustObjTypeReloadAuxSlotPtr;
+ }
}
}
}
@@ -5781,7 +5768,7 @@ BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSy
}
void
-BackwardPass::InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPropertyCacheBucket *data)
+BackwardPass::InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses)
{
if (!this->IsPrePass())
{
@@ -5790,11 +5777,11 @@ BackwardPass::InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPro
{
// The instr with the bailout is something like a branch that may not fall through.
// Insert the transitions instead at the beginning of each successor block.
- this->InsertTypeTransitionsAtPriorSuccessors(this->currentBlock, nullptr, symId, data);
+ this->InsertTypeTransitionsAtPriorSuccessors(this->currentBlock, nullptr, symId, data, upwardExposedUses);
}
else
{
- this->InsertTypeTransition(instr->m_next, symId, data);
+ this->InsertTypeTransition(instr->m_next, symId, data, upwardExposedUses);
}
}
// Note: we could probably clear this entry out of the table, but I don't know
@@ -5803,7 +5790,7 @@ BackwardPass::InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPro
}
void
-BackwardPass::InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPropertyCacheBucket *data)
+BackwardPass::InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses)
{
bool inserted = false;
FOREACH_INSTR_IN_BLOCK(instr, block)
@@ -5826,7 +5813,7 @@ BackwardPass::InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPrope
}
else
{
- this->InsertTypeTransition(instr, symId, data);
+ this->InsertTypeTransition(instr, symId, data, upwardExposedUses);
inserted = true;
break;
}
@@ -5836,7 +5823,7 @@ BackwardPass::InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPrope
if (!inserted)
{
Assert(block->GetLastInstr()->m_next);
- this->InsertTypeTransition(block->GetLastInstr()->m_next, symId, data);
+ this->InsertTypeTransition(block->GetLastInstr()->m_next, symId, data, upwardExposedUses);
}
}
@@ -5845,7 +5832,8 @@ BackwardPass::InsertTypeTransitionsAtPriorSuccessors(
BasicBlock *block,
BasicBlock *blockSucc,
int symId,
- AddPropertyCacheBucket *data)
+ AddPropertyCacheBucket *data,
+ BVSparse* upwardExposedUses)
{
// For each successor of block prior to blockSucc, adjust the type.
FOREACH_SUCCESSOR_BLOCK(blockFix, block)
@@ -5855,7 +5843,7 @@ BackwardPass::InsertTypeTransitionsAtPriorSuccessors(
return;
}
- this->InsertTypeTransitionAtBlock(blockFix, symId, data);
+ this->InsertTypeTransitionAtBlock(blockFix, symId, data, upwardExposedUses);
}
NEXT_SUCCESSOR_BLOCK;
}
@@ -5873,7 +5861,7 @@ BackwardPass::InsertTypeTransitionsAtPotentialKills()
// Also do this for ctor cache updates, to avoid putting a type in the ctor cache that extends past
// the end of the ctor that the cache covers.
this->ForEachAddPropertyCacheBucket([&](int symId, AddPropertyCacheBucket *data)->bool {
- this->InsertTypeTransitionAfterInstr(instr, symId, data);
+ this->InsertTypeTransitionAfterInstr(instr, symId, data, this->currentBlock->upwardExposedUses);
return false;
});
}
@@ -5899,7 +5887,7 @@ BackwardPass::InsertTypeTransitionsAtPotentialKills()
if (this->TransitionUndoesObjectHeaderInlining(data))
{
// We're transitioning from inlined to non-inlined, so we can't push it up any farther.
- this->InsertTypeTransitionAfterInstr(instr, symId, data);
+ this->InsertTypeTransitionAfterInstr(instr, symId, data, this->currentBlock->upwardExposedUses);
}
return false;
});
@@ -6568,7 +6556,6 @@ BackwardPass::TrackIntUsage(IR::Instr *const instr)
case Js::OpCode::Coerce_Regex:
case Js::OpCode::Coerce_StrOrRegex:
case Js::OpCode::Conv_PrimStr:
- case Js::OpCode::Conv_Prop:
// These instructions don't generate -0, and their behavior is the same for any src that is -0 or +0
SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc1());
SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc2());
@@ -7441,52 +7428,6 @@ BackwardPass::TrackFloatSymEquivalence(IR::Instr *const instr)
}
}
-bool
-BackwardPass::SymIsIntconstOrSelf(Sym *sym, IR::Opnd *opnd)
-{
- Assert(sym->IsStackSym());
- if (!opnd->IsRegOpnd())
- {
- return false;
- }
- StackSym *opndSym = opnd->AsRegOpnd()->m_sym;
-
- if (sym == opndSym)
- {
- return true;
- }
-
- if (!opndSym->IsSingleDef())
- {
- return false;
- }
-
- if (opndSym->GetInstrDef()->m_opcode == Js::OpCode::LdC_A_I4)
- {
- return true;
- }
-
- return false;
-}
-
-bool
-BackwardPass::InstrPreservesNumberValues(IR::Instr *instr, Sym *defSym)
-{
- if (instr->m_opcode == Js::OpCode::Ld_A)
- {
- if (instr->GetSrc1()->IsRegOpnd())
- {
- IR::RegOpnd *src1 = instr->GetSrc1()->AsRegOpnd();
- if (src1->m_sym->IsSingleDef())
- {
- instr = src1->m_sym->GetInstrDef();
- }
- }
- }
- return (OpCodeAttr::ProducesNumber(instr->m_opcode) ||
- (instr->m_opcode == Js::OpCode::Add_A && this->SymIsIntconstOrSelf(defSym, instr->GetSrc1()) && this->SymIsIntconstOrSelf(defSym, instr->GetSrc2())));
-}
-
bool
BackwardPass::ProcessDef(IR::Opnd * opnd)
{
@@ -7501,19 +7442,7 @@ BackwardPass::ProcessDef(IR::Opnd * opnd)
this->InvalidateCloneStrCandidate(opnd);
if ((tag == Js::BackwardPhase) && IsPrePass())
{
- bool firstDef = !this->currentPrePassLoop->symsAssignedToInLoop->TestAndSet(sym->m_id);
-
- if (firstDef)
- {
- if (this->InstrPreservesNumberValues(this->currentInstr, sym))
- {
- this->currentPrePassLoop->preservesNumberValue->Set(sym->m_id);
- }
- }
- else if (!this->InstrPreservesNumberValues(this->currentInstr, sym))
- {
- this->currentPrePassLoop->preservesNumberValue->Clear(sym->m_id);
- }
+ this->currentPrePassLoop->symsAssignedToInLoop->Set(sym->m_id);
}
}
}
@@ -8013,10 +7942,12 @@ BackwardPass::ProcessInlineeEnd(IR::Instr* instr)
}
if (this->tag == Js::BackwardPhase)
{
- if (!GlobOpt::DoInlineArgsOpt(instr->m_func))
+ // Commenting out to allow for argument length and argument[constant] optimization
+ // Will revisit in phase two
+ /*if (!GlobOpt::DoInlineArgsOpt(instr->m_func))
{
return;
- }
+ }*/
// This adds a use for function sym as part of InlineeStart & all the syms referenced by the args.
// It ensure they do not get cleared from the copy prop sym map.
@@ -8395,7 +8326,7 @@ BackwardPass::ReverseCopyProp(IR::Instr *instr)
FOREACH_SLISTBASE_ENTRY(
CopyPropSyms,
usedCopyPropSym,
- &instrPrev->GetBailOutInfo()->usedCapturedValues.copyPropSyms)
+ &instrPrev->GetBailOutInfo()->usedCapturedValues->copyPropSyms)
{
if(dstSym == usedCopyPropSym.Value())
{
@@ -8756,16 +8687,8 @@ BackwardPass::RestoreInductionVariableValuesAfterMemOp(Loop *loop)
StackSym *sym = localFunc->m_symTable->FindStackSym(symId)->GetInt32EquivSym(localFunc);
IR::Opnd *inductionVariableOpnd = IR::RegOpnd::New(sym, IRType::TyInt32, localFunc);
- IR::Opnd *sizeOpnd = globOpt->GenerateInductionVariableChangeForMemOp(loop, inductionVariableChangeInfo.unroll, loop->memOpInfo->instr);
- IR::Instr* restoreInductionVarInstr = IR::Instr::New(opCode, inductionVariableOpnd, inductionVariableOpnd, sizeOpnd, loop->GetFunc());
-
- // The IR that restores the induction variable's value is placed before the MemOp. Since this IR can
- // bailout to the loop's landing pad, placing this IR before the MemOp avoids performing the MemOp,
- // bailing out because of this IR, and then performing the effects of the loop again.
- loop->landingPad->InsertInstrBefore(restoreInductionVarInstr, loop->memOpInfo->instr);
-
- // If restoring an induction variable results in an overflow, bailout to the loop's landing pad.
- restoreInductionVarInstr->ConvertToBailOutInstr(loop->bailOutInfo, IR::BailOutOnOverflow);
+ IR::Opnd *sizeOpnd = globOpt->GenerateInductionVariableChangeForMemOp(loop, inductionVariableChangeInfo.unroll);
+ loop->landingPad->InsertAfter(IR::Instr::New(opCode, inductionVariableOpnd, inductionVariableOpnd, sizeOpnd, loop->GetFunc()));
};
for (auto it = loop->memOpInfo->inductionVariableChangeInfoMap->GetIterator(); it.IsValid(); it.MoveNext())
diff --git a/deps/chakrashim/core/lib/Backend/BackwardPass.h b/deps/chakrashim/core/lib/Backend/BackwardPass.h
index 68a53439c43..711b38483fe 100644
--- a/deps/chakrashim/core/lib/Backend/BackwardPass.h
+++ b/deps/chakrashim/core/lib/Backend/BackwardPass.h
@@ -36,9 +36,6 @@ class BackwardPass
bool ProcessDef(IR::Opnd * opnd);
void ProcessTransfers(IR::Instr * instr);
void ProcessFieldKills(IR::Instr * instr);
- bool SymIsIntconstOrSelf(Sym *sym, IR::Opnd *opnd);
- bool InstrPreservesNumberValues(IR::Instr *instr, Sym *defSym);
-
template void ClearBucketsOnFieldKill(IR::Instr *instr, HashTable *table);
StackSym* ProcessByteCodeUsesDst(IR::ByteCodeUsesInstr * byteCodeUsesInstr);
const BVSparse* ProcessByteCodeUsesSrcs(IR::ByteCodeUsesInstr * byteCodeUsesInstr);
@@ -141,11 +138,11 @@ class BackwardPass
void TrackObjTypeSpecProperties(IR::PropertySymOpnd *opnd, BasicBlock *block);
void TrackObjTypeSpecWriteGuards(IR::PropertySymOpnd *opnd, BasicBlock *block);
void TrackAddPropertyTypes(IR::PropertySymOpnd *opnd, BasicBlock *block);
- void InsertTypeTransition(IR::Instr *instrInsertBefore, int symId, AddPropertyCacheBucket *data);
- void InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSym, AddPropertyCacheBucket *data);
- void InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPropertyCacheBucket *data);
- void InsertTypeTransitionsAtPriorSuccessors(BasicBlock *block, BasicBlock *blockSucc, int symId, AddPropertyCacheBucket *data);
- void InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPropertyCacheBucket *data);
+ void InsertTypeTransition(IR::Instr *instrInsertBefore, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses);
+ void InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSym, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses);
+ void InsertTypeTransitionAtBlock(BasicBlock *block, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses);
+ void InsertTypeTransitionsAtPriorSuccessors(BasicBlock *block, BasicBlock *blockSucc, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses);
+ void InsertTypeTransitionAfterInstr(IR::Instr *instr, int symId, AddPropertyCacheBucket *data, BVSparse* upwardExposedUses);
void InsertTypeTransitionsAtPotentialKills();
bool TransitionUndoesObjectHeaderInlining(AddPropertyCacheBucket *data) const;
@@ -191,7 +188,7 @@ class BackwardPass
BVSparse * intOverflowDoesNotMatterInRangeBySymId;
BVSparse * candidateSymsRequiredToBeInt;
BVSparse * candidateSymsRequiredToBeLossyInt;
- BVSparse * considerSymsAsRealUsesInNoImplicitCallUses;
+ StackSym * considerSymAsRealUseInNoImplicitCallUses;
bool intOverflowCurrentlyMattersInRange;
bool isCollectionPass;
enum class CollectionPassSubPhase
diff --git a/deps/chakrashim/core/lib/Backend/BailOut.cpp b/deps/chakrashim/core/lib/Backend/BailOut.cpp
index 84097adb46d..9d082669db3 100644
--- a/deps/chakrashim/core/lib/Backend/BailOut.cpp
+++ b/deps/chakrashim/core/lib/Backend/BailOut.cpp
@@ -22,10 +22,29 @@ BailOutInfo::Clear(JitArenaAllocator * allocator)
{
this->capturedValues->constantValues.Clear(allocator);
this->capturedValues->copyPropSyms.Clear(allocator);
+
+ if (this->capturedValues->argObjSyms)
+ {
+ JitAdelete(allocator, this->capturedValues->argObjSyms);
+ }
+
JitAdelete(allocator, this->capturedValues);
}
- this->usedCapturedValues.constantValues.Clear(allocator);
- this->usedCapturedValues.copyPropSyms.Clear(allocator);
+
+ if (this->usedCapturedValues)
+ {
+ Assert(this->usedCapturedValues->refCount == 0);
+ this->usedCapturedValues->constantValues.Clear(allocator);
+ this->usedCapturedValues->copyPropSyms.Clear(allocator);
+
+ if (this->usedCapturedValues->argObjSyms)
+ {
+ JitAdelete(allocator, this->usedCapturedValues->argObjSyms);
+ }
+
+ JitAdelete(allocator, this->usedCapturedValues);
+ }
+
if (byteCodeUpwardExposedUsed)
{
JitAdelete(allocator, byteCodeUpwardExposedUsed);
@@ -576,10 +595,10 @@ BailOutRecord::RestoreValues(IR::BailOutKind bailOutKind, Js::JavascriptCallStac
Assert(RegTypes[LinearScanMD::GetRegisterFromSaveIndex(offset)] != TyFloat64);
value = registerSaveSpace[offset - 1];
}
- Assert(Js::DynamicObject::Is(value));
+ Assert(Js::DynamicObject::IsBaseDynamicObject(value));
Assert(ThreadContext::IsOnStack(value));
- Js::DynamicObject * obj = Js::DynamicObject::FromVar(value);
+ Js::DynamicObject * obj = Js::VarTo(value);
uint propertyCount = obj->GetPropertyCount();
for (uint j = record.initFldCount; j < propertyCount; j++)
{
@@ -656,7 +675,7 @@ BailOutRecord::RestoreValues(IR::BailOutKind bailOutKind, Js::JavascriptCallStac
if (branchValueRegSlot != Js::Constants::NoRegister)
{
// Used when a t1 = CmCC is optimize to BrCC, and the branch bails out. T1 needs to be restored
- Assert(branchValue && Js::JavascriptBoolean::Is(branchValue));
+ Assert(branchValue && Js::VarIs(branchValue));
Assert(branchValueRegSlot < newInstance->GetJavascriptFunction()->GetFunctionBody()->GetLocalsCount());
newInstance->m_localSlots[branchValueRegSlot] = branchValue;
}
@@ -1004,7 +1023,7 @@ BailOutRecord::BailOutCommonNoCodeGen(Js::JavascriptCallStackLayout * layout, Ba
BailOutReturnValue * bailOutReturnValue, void * argoutRestoreAddress)
{
Assert(bailOutRecord->parent == nullptr);
- Assert(Js::ScriptFunction::Is(layout->functionObject));
+ Assert(Js::VarIs(layout->functionObject));
Js::ScriptFunction ** functionRef = (Js::ScriptFunction **)&layout->functionObject;
Js::ArgumentReader args(&layout->callInfo, layout->args);
Js::Var result = BailOutHelper(layout, functionRef, args, false, bailOutRecord, bailOutOffset, returnAddress, bailOutKind, registerSaves, bailOutReturnValue, layout->GetArgumentsObjectLocation(), branchValue, argoutRestoreAddress);
@@ -1031,7 +1050,7 @@ uint32 bailOutOffset, void * returnAddress, IR::BailOutKind bailOutKind, Js::Imp
sizeof(registerSaves));
Js::Var result = BailOutCommonNoCodeGen(layout, bailOutRecord, bailOutOffset, returnAddress, bailOutKind, branchValue, registerSaves, bailOutReturnValue, argoutRestoreAddress);
- ScheduleFunctionCodeGen(Js::ScriptFunction::FromVar(layout->functionObject), nullptr, bailOutRecord, bailOutKind, bailOutOffset, savedImplicitCallFlags, returnAddress);
+ ScheduleFunctionCodeGen(Js::VarTo(layout->functionObject), nullptr, bailOutRecord, bailOutKind, bailOutOffset, savedImplicitCallFlags, returnAddress);
return result;
}
@@ -1060,7 +1079,7 @@ BailOutRecord::BailOutInlinedCommon(Js::JavascriptCallStackLayout * layout, Bail
}
Js::Var result = BailOutCommonNoCodeGen(layout, currentBailOutRecord, currentBailOutRecord->bailOutOffset, returnAddress, bailOutKind, branchValue,
registerSaves, &bailOutReturnValue);
- ScheduleFunctionCodeGen(Js::ScriptFunction::FromVar(layout->functionObject), innerMostInlinee, currentBailOutRecord, bailOutKind, bailOutOffset, savedImplicitCallFlags, returnAddress);
+ ScheduleFunctionCodeGen(Js::VarTo(layout->functionObject), innerMostInlinee, currentBailOutRecord, bailOutKind, bailOutOffset, savedImplicitCallFlags, returnAddress);
return result;
}
@@ -1076,7 +1095,7 @@ BailOutRecord::BailOutFromLoopBodyCommon(Js::JavascriptCallStackLayout * layout,
js_memcpy_s(registerSaves, sizeof(registerSaves), (Js::Var *)layout->functionObject->GetScriptContext()->GetThreadContext()->GetBailOutRegisterSaveSpace(),
sizeof(registerSaves));
uint32 result = BailOutFromLoopBodyHelper(layout, bailOutRecord, bailOutOffset, bailOutKind, branchValue, registerSaves);
- ScheduleLoopBodyCodeGen(Js::ScriptFunction::FromVar(layout->functionObject), nullptr, bailOutRecord, bailOutKind);
+ ScheduleLoopBodyCodeGen(Js::VarTo(layout->functionObject), nullptr, bailOutRecord, bailOutKind);
return result;
}
@@ -1106,7 +1125,7 @@ BailOutRecord::BailOutFromLoopBodyInlinedCommon(Js::JavascriptCallStackLayout *
uint32 result = BailOutFromLoopBodyHelper(layout, currentBailOutRecord, currentBailOutRecord->bailOutOffset,
bailOutKind, nullptr, registerSaves, &bailOutReturnValue);
- ScheduleLoopBodyCodeGen(Js::ScriptFunction::FromVar(layout->functionObject), innerMostInlinee, currentBailOutRecord, bailOutKind);
+ ScheduleLoopBodyCodeGen(Js::VarTo(layout->functionObject), innerMostInlinee, currentBailOutRecord, bailOutKind);
return result;
}
@@ -1118,7 +1137,7 @@ BailOutRecord::BailOutInlinedHelper(Js::JavascriptCallStackLayout * layout, Bail
BailOutReturnValue * lastBailOutReturnValue = nullptr;
*innerMostInlinee = nullptr;
- Js::FunctionBody* functionBody = Js::ScriptFunction::FromVar(layout->functionObject)->GetFunctionBody();
+ Js::FunctionBody* functionBody = Js::VarTo(layout->functionObject)->GetFunctionBody();
Js::EntryPointInfo *entryPointInfo;
if(isInLoopBody)
@@ -1162,7 +1181,7 @@ BailOutRecord::BailOutInlinedHelper(Js::JavascriptCallStackLayout * layout, Bail
Js::ScriptFunction ** functionRef = (Js::ScriptFunction **)&(inlinedFrame->function);
AnalysisAssert(*functionRef);
- Assert(Js::ScriptFunction::Is(inlinedFrame->function));
+ Assert(Js::VarIs(inlinedFrame->function));
if (*innerMostInlinee == nullptr)
{
@@ -1381,7 +1400,7 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF
// when resuming a generator and not needed when yielding from a generator, as is occurring
// here.
AssertMsg(args.Info.Count == 2, "Generator ScriptFunctions should only be invoked by generator APIs with the pair of arguments they pass in -- the generator object and a ResumeYieldData pointer");
- Js::JavascriptGenerator* generator = Js::JavascriptGenerator::FromVar(args[0]);
+ Js::JavascriptGenerator* generator = Js::VarTo(args[0]);
newInstance = generator->GetFrame();
if (newInstance != nullptr)
@@ -1403,7 +1422,6 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF
//
Js::Arguments generatorArgs = generator->GetArguments();
Js::InterpreterStackFrame::Setup setup(function, generatorArgs, true, isInlinee);
- Assert(setup.GetStackAllocationVarCount() == 0);
size_t varAllocCount = setup.GetAllocationVarCount();
size_t varSizeInBytes = varAllocCount * sizeof(Js::Var);
DWORD_PTR stackAddr = reinterpret_cast(&generator); // as mentioned above, use any stack address from this frame to ensure correct debugging functionality
@@ -1416,14 +1434,11 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF
// Allocate invalidVar on GC instead of stack since this InterpreterStackFrame will out live the current real frame
Js::Var invalidVar = (Js::RecyclableObject*)RecyclerNewPlusLeaf(functionScriptContext->GetRecycler(), sizeof(Js::RecyclableObject), Js::Var);
memset(invalidVar, 0xFE, sizeof(Js::RecyclableObject));
+ newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, stackAddr, invalidVar);
+#else
+ newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, stackAddr);
#endif
- newInstance = setup.InitializeAllocation(allocation, nullptr, false, false, loopHeaderArray, stackAddr
-#if DBG
- , invalidVar
-#endif
- );
-
newInstance->m_reader.Create(executeFunction);
generator->SetFrame(newInstance, varSizeInBytes);
@@ -1433,28 +1448,18 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF
{
Js::InterpreterStackFrame::Setup setup(function, args, true, isInlinee);
size_t varAllocCount = setup.GetAllocationVarCount();
- size_t stackVarAllocCount = setup.GetStackAllocationVarCount();
- size_t varSizeInBytes;
- Js::Var *stackAllocation = nullptr;
+ size_t varSizeInBytes = varAllocCount * sizeof(Js::Var);
// If the locals area exceeds a certain limit, allocate it from a private arena rather than
// this frame. The current limit is based on an old assert on the number of locals we would allow here.
- if ((varAllocCount + stackVarAllocCount) > Js::InterpreterStackFrame::LocalsThreshold)
+ if (varAllocCount > Js::InterpreterStackFrame::LocalsThreshold)
{
ArenaAllocator *tmpAlloc = nullptr;
fReleaseAlloc = functionScriptContext->EnsureInterpreterArena(&tmpAlloc);
- varSizeInBytes = varAllocCount * sizeof(Js::Var);
allocation = (Js::Var*)tmpAlloc->Alloc(varSizeInBytes);
- if (stackVarAllocCount != 0)
- {
- size_t stackVarSizeInBytes = stackVarAllocCount * sizeof(Js::Var);
- PROBE_STACK_PARTIAL_INITIALIZED_BAILOUT_FRAME(functionScriptContext, Js::Constants::MinStackInterpreter + stackVarSizeInBytes, returnAddress);
- stackAllocation = (Js::Var*)_alloca(stackVarSizeInBytes);
- }
}
else
{
- varSizeInBytes = (varAllocCount + stackVarAllocCount) * sizeof(Js::Var);
PROBE_STACK_PARTIAL_INITIALIZED_BAILOUT_FRAME(functionScriptContext, Js::Constants::MinStackInterpreter + varSizeInBytes, returnAddress);
allocation = (Js::Var*)_alloca(varSizeInBytes);
}
@@ -1479,14 +1484,11 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF
#if DBG
Js::Var invalidStackVar = (Js::RecyclableObject*)_alloca(sizeof(Js::RecyclableObject));
memset(invalidStackVar, 0xFE, sizeof(Js::RecyclableObject));
+ newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, frameStackAddr, invalidStackVar);
+#else
+ newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, frameStackAddr);
#endif
- newInstance = setup.InitializeAllocation(allocation, stackAllocation, false, false, loopHeaderArray, frameStackAddr
-#if DBG
- , invalidStackVar
-#endif
- );
-
newInstance->m_reader.Create(executeFunction);
}
diff --git a/deps/chakrashim/core/lib/Backend/BailOut.h b/deps/chakrashim/core/lib/Backend/BailOut.h
index 208d940552f..5bac8e98863 100644
--- a/deps/chakrashim/core/lib/Backend/BailOut.h
+++ b/deps/chakrashim/core/lib/Backend/BailOut.h
@@ -27,7 +27,7 @@ class BailOutInfo
BailOutInfo(uint32 bailOutOffset, Func* bailOutFunc) :
bailOutOffset(bailOutOffset), bailOutFunc(bailOutFunc),
byteCodeUpwardExposedUsed(nullptr), polymorphicCacheIndex((uint)-1), startCallCount(0), startCallInfo(nullptr), bailOutInstr(nullptr),
- totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false), canDeadStore(true),
+ totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), isLoopTopBailOutInfo(false),
outParamInlinedArgSlot(nullptr), liveVarSyms(nullptr), liveLosslessInt32Syms(nullptr), liveFloat64Syms(nullptr),
branchConditionOpnd(nullptr),
stackLiteralBailOutInfoCount(0), stackLiteralBailOutInfo(nullptr)
@@ -41,7 +41,9 @@ class BailOutInfo
#endif
this->capturedValues = JitAnew(bailOutFunc->m_alloc, CapturedValues);
this->capturedValues->refCount = 1;
- this->usedCapturedValues.argObjSyms = nullptr;
+
+ this->usedCapturedValues = JitAnew(bailOutFunc->m_alloc, CapturedValues);
+ this->usedCapturedValues->argObjSyms = nullptr;
}
void Clear(JitArenaAllocator * allocator);
@@ -69,7 +71,6 @@ class BailOutInfo
#endif
bool wasCloned;
bool isInvertedBranch;
- bool canDeadStore;
bool sharedBailOutKind;
bool isLoopTopBailOutInfo;
@@ -78,9 +79,9 @@ class BailOutInfo
#endif
uint32 bailOutOffset;
BailOutRecord * bailOutRecord;
- CapturedValues* capturedValues; // Values we know about after forward pass
- CapturedValues usedCapturedValues; // Values that need to be restored in the bail out
- BVSparse * byteCodeUpwardExposedUsed; // Non-constant stack syms that needs to be restored in the bail out
+ CapturedValues * capturedValues; // Values we know about after forward pass
+ CapturedValues * usedCapturedValues; // Values that need to be restored in the bail out
+ BVSparse * byteCodeUpwardExposedUsed; // Non-constant stack syms that needs to be restored in the bail out
uint polymorphicCacheIndex;
uint startCallCount;
uint totalOutParamCount;
diff --git a/deps/chakrashim/core/lib/Backend/Chakra.Backend.vcxproj.filters b/deps/chakrashim/core/lib/Backend/Chakra.Backend.vcxproj.filters
index dc7bd3e8aa0..b2ed7496d8a 100644
--- a/deps/chakrashim/core/lib/Backend/Chakra.Backend.vcxproj.filters
+++ b/deps/chakrashim/core/lib/Backend/Chakra.Backend.vcxproj.filters
@@ -1,434 +1,434 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- arm
-
-
- arm64
-
-
- i386
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- i386
-
-
- i386
-
-
- amd64
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- i386
-
-
- arm
-
-
- arm64
-
-
- arm64
-
-
- arm
-
-
- arm64
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- arm64
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- arm
-
-
- arm64
-
-
- arm64
-
-
- arm64
-
-
- arm
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- i386
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- i386
-
-
- i386
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- arm64
-
-
- arm
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- i386
-
-
- i386
-
-
- amd64
-
-
- i386
-
-
- i386
-
-
- i386
-
-
- i386
-
-
- arm
-
-
- arm
-
-
- arm64
-
-
- amd64
-
-
- amd64
-
-
- arm
-
-
- arm64
-
-
- arm
-
-
- arm64
-
-
- arm
-
-
- arm64
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- arm64
-
-
- arm64
-
-
-
-
-
- amd64
-
-
- amd64
-
-
-
-
-
-
-
- amd64
-
-
- amd64
-
-
-
-
- {ddab7816-e35a-4dd5-9512-db82e2153113}
-
-
- {4d6939f5-cbe4-4baa-a646-7aa81e51ffff}
-
-
- {3844196d-292e-423d-b261-cdb6b14c2134}
-
-
- {125f23d6-b617-4591-a9f1-930443613748}
-
-
-
-
- arm
-
-
- arm
-
-
- arm64
-
-
- arm64
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ arm
+
+
+ arm64
+
+
+ i386
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ i386
+
+
+ i386
+
+
+ amd64
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ i386
+
+
+ arm
+
+
+ arm64
+
+
+ arm64
+
+
+ arm
+
+
+ arm64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ arm64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ arm
+
+
+ arm64
+
+
+ arm64
+
+
+ arm64
+
+
+ arm
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ i386
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ i386
+
+
+ i386
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ arm64
+
+
+ arm
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ i386
+
+
+ i386
+
+
+ amd64
+
+
+ i386
+
+
+ i386
+
+
+ i386
+
+
+ i386
+
+
+ arm
+
+
+ arm
+
+
+ arm64
+
+
+ amd64
+
+
+ amd64
+
+
+ arm
+
+
+ arm64
+
+
+ arm
+
+
+ arm64
+
+
+ arm
+
+
+ arm64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ arm64
+
+
+ arm64
+
+
+
+
+
+ amd64
+
+
+ amd64
+
+
+
+
+
+
+
+ amd64
+
+
+ amd64
+
+
+
+
+ {ddab7816-e35a-4dd5-9512-db82e2153113}
+
+
+ {4d6939f5-cbe4-4baa-a646-7aa81e51ffff}
+
+
+ {3844196d-292e-423d-b261-cdb6b14c2134}
+
+
+ {125f23d6-b617-4591-a9f1-930443613748}
+
+
+
+
+ arm
+
+
+ arm
+
+
+ arm64
+
+
+ arm64
+
+
\ No newline at end of file
diff --git a/deps/chakrashim/core/lib/Backend/CodeGenNumberAllocator.cpp b/deps/chakrashim/core/lib/Backend/CodeGenNumberAllocator.cpp
index 19ec6b2b200..ddf424c2dd2 100644
--- a/deps/chakrashim/core/lib/Backend/CodeGenNumberAllocator.cpp
+++ b/deps/chakrashim/core/lib/Backend/CodeGenNumberAllocator.cpp
@@ -571,4 +571,4 @@ XProcNumberPageSegmentManager::~XProcNumberPageSegmentManager()
temp = (XProcNumberPageSegmentImpl*)next;
}
}
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/CodeGenWorkItem.cpp b/deps/chakrashim/core/lib/Backend/CodeGenWorkItem.cpp
index f5979e6032f..44b74803c3c 100644
--- a/deps/chakrashim/core/lib/Backend/CodeGenWorkItem.cpp
+++ b/deps/chakrashim/core/lib/Backend/CodeGenWorkItem.cpp
@@ -210,7 +210,19 @@ void CodeGenWorkItem::OnWorkItemProcessFail(NativeCodeGenerator* codeGen)
#if PDATA_ENABLED & defined(_WIN32)
if (this->entryPointInfo)
{
- this->entryPointInfo->GetNativeEntryPointData()->CleanupXDataInfo();
+ XDataAllocation * xdataAllocation = this->entryPointInfo->GetNativeEntryPointData()->GetXDataInfo();
+ if (xdataAllocation)
+ {
+ void* functionTable = xdataAllocation->functionTable;
+ if (functionTable)
+ {
+ if (!DelayDeletingFunctionTable::AddEntry(functionTable))
+ {
+ PHASE_PRINT_TESTTRACE1(Js::XDataPhase, _u("[%d]OnWorkItemProcessFail: Failed to add to slist, table: %llx\n"), GetCurrentThreadId(), functionTable);
+ DelayDeletingFunctionTable::DeleteFunctionTable(functionTable);
+ }
+ }
+ }
}
#endif
codeGen->FreeNativeCodeGenAllocation(this->allocation->allocation->address);
diff --git a/deps/chakrashim/core/lib/Backend/EmitBuffer.cpp b/deps/chakrashim/core/lib/Backend/EmitBuffer.cpp
index 107e9429f14..2a4c407f201 100644
--- a/deps/chakrashim/core/lib/Backend/EmitBuffer.cpp
+++ b/deps/chakrashim/core/lib/Backend/EmitBuffer.cpp
@@ -414,7 +414,7 @@ bool EmitBufferManager::ProtectBufferWith
template
bool EmitBufferManager::CommitBufferForInterpreter(TEmitBufferAllocation* allocation, _In_reads_bytes_(bufferSize) BYTE* pBuffer, _In_ size_t bufferSize)
{
- AutoRealOrFakeCriticalSection autoCs(&this->criticalSection);
+ Assert(this->criticalSection.IsLocked());
Assert(allocation != nullptr);
allocation->bytesUsed += bufferSize;
diff --git a/deps/chakrashim/core/lib/Backend/Encoder.cpp b/deps/chakrashim/core/lib/Backend/Encoder.cpp
index 493f112f746..e6ce1d43f3c 100644
--- a/deps/chakrashim/core/lib/Backend/Encoder.cpp
+++ b/deps/chakrashim/core/lib/Backend/Encoder.cpp
@@ -320,6 +320,165 @@ Encoder::Encode()
}
}
+ // Assembly Dump Phase
+ // This phase exists to assist tooling that expects "assemblable" output - that is,
+ // output that, with minimal manual handling, could theoretically be fed to another
+ // assembler to make a valid function for the target platform. We don't guarantee a
+ // dump from this will _actually_ be assemblable, but it is significantly closer to
+ // that than our normal, annotated output
+#if DBG_DUMP
+ if (PHASE_DUMP(Js::AssemblyPhase, m_func))
+ {
+ FOREACH_INSTR_IN_FUNC(instr, m_func)
+ {
+ bool hasPrintedForOpnds = false;
+ Func* localScopeFuncForLambda = m_func;
+ auto printOpnd = [&hasPrintedForOpnds, localScopeFuncForLambda](IR::Opnd* opnd)
+ {
+ if (hasPrintedForOpnds)
+ {
+ Output::Print(_u(", "));
+ }
+ switch (opnd->m_kind)
+ {
+ case IR::OpndKindInvalid:
+ AssertMsg(false, "Should be unreachable");
+ break;
+ case IR::OpndKindIntConst:
+ Output::Print(_u("%lli"), (long long int)opnd->AsIntConstOpnd()->GetValue());
+ break;
+ case IR::OpndKindInt64Const:
+ case IR::OpndKindFloatConst:
+ case IR::OpndKindFloat32Const:
+ case IR::OpndKindSimd128Const:
+ AssertMsg(false, "Not Yet Implemented");
+ break;
+ case IR::OpndKindHelperCall:
+ Output::Print(_u("%s"), IR::GetMethodName(opnd->AsHelperCallOpnd()->m_fnHelper));
+ break;
+ case IR::OpndKindSym:
+ Output::Print(_u("SYM("));
+ opnd->Dump(IRDumpFlags_SimpleForm, localScopeFuncForLambda);
+ Output::Print(_u(")"));
+ break;
+ case IR::OpndKindReg:
+ Output::Print(_u("%S"), RegNames[opnd->AsRegOpnd()->GetReg()]);
+ break;
+ case IR::OpndKindAddr:
+ Output::Print(_u("0x%p"), opnd->AsAddrOpnd()->m_address);
+ break;
+ case IR::OpndKindIndir:
+ {
+ IR::IndirOpnd* indirOpnd = opnd->AsIndirOpnd();
+ IR::RegOpnd* baseOpnd = indirOpnd->GetBaseOpnd();
+ IR::RegOpnd* indexOpnd = indirOpnd->GetIndexOpnd();
+ Output::Print(_u("["));
+ bool hasPrintedComponent = false;
+ if (baseOpnd != nullptr)
+ {
+ Output::Print(_u("%S"), RegNames[baseOpnd->GetReg()]);
+ hasPrintedComponent = true;
+ }
+ if (indexOpnd != nullptr)
+ {
+ if (hasPrintedComponent)
+ {
+ Output::Print(_u(" + "));
+ }
+ Output::Print(_u("%S * %u"), RegNames[indexOpnd->GetReg()], indirOpnd->GetScale());
+ hasPrintedComponent = true;
+ }
+ if (hasPrintedComponent)
+ {
+ Output::Print(_u(" + "));
+ }
+ Output::Print(_u("(%i)]"), indirOpnd->GetOffset());
+ break;
+ }
+ case IR::OpndKindLabel:
+ opnd->Dump(IRDumpFlags_SimpleForm, localScopeFuncForLambda);
+ break;
+ case IR::OpndKindMemRef:
+ opnd->DumpOpndKindMemRef(true, localScopeFuncForLambda);
+ break;
+ case IR::OpndKindRegBV:
+ AssertMsg(false, "Should be unreachable");
+ break;
+ case IR::OpndKindList:
+ AssertMsg(false, "Should be unreachable");
+ break;
+ default:
+ AssertMsg(false, "Missing operand type");
+ }
+ hasPrintedForOpnds = true;
+ };
+ switch(instr->GetKind())
+ {
+ case IR::InstrKindInvalid:
+ Assert(false);
+ break;
+ case IR::InstrKindJitProfiling:
+ case IR::InstrKindProfiled:
+ case IR::InstrKindInstr:
+ {
+ Output::SkipToColumn(4);
+ Output::Print(_u("%s "), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
+ Output::SkipToColumn(18);
+ IR::Opnd* dst = instr->GetDst();
+ IR::Opnd* src1 = instr->GetSrc1();
+ IR::Opnd* src2 = instr->GetSrc2();
+ if (dst != nullptr && (src1 == nullptr || !dst->IsRegOpnd() || !src1->IsRegOpnd() || dst->AsRegOpnd()->GetReg() != src1->AsRegOpnd()->GetReg())) // Print dst if it's there, and not the same reg as src1 (which is usually an instr that has a srcdest
+ {
+ printOpnd(dst);
+ }
+ if (src1 != nullptr)
+ {
+ printOpnd(src1);
+ }
+ if (src2 != nullptr)
+ {
+ printOpnd(src2);
+ }
+ break;
+ }
+ case IR::InstrKindBranch:
+ Output::SkipToColumn(4);
+ Output::Print(_u("%s "), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode));
+ Output::SkipToColumn(18);
+ if (instr->AsBranchInstr()->IsMultiBranch())
+ {
+ Assert(instr->GetSrc1() != nullptr);
+ printOpnd(instr->GetSrc1());
+ }
+ else
+ {
+ Output::Print(_u("L%u"), instr->AsBranchInstr()->GetTarget()->m_id);
+ }
+ break;
+ case IR::InstrKindProfiledLabel:
+ case IR::InstrKindLabel:
+ Output::Print(_u("L%u:"), instr->AsLabelInstr()->m_id);
+ break;
+ case IR::InstrKindEntry:
+ case IR::InstrKindExit:
+ case IR::InstrKindPragma:
+ // No output
+ break;
+ case IR::InstrKindByteCodeUses:
+ AssertMsg(false, "Instruction kind shouldn't be present here");
+ break;
+ default:
+ Assert(false);
+ break;
+ }
+ Output::SetAlignAndPrefix(60, _u("; "));
+ instr->Dump();
+ Output::ResetAlignAndPrefix();
+ } NEXT_INSTR_IN_FUNC;
+ }
+#endif
+ // End Assembly Dump Phase
+
BEGIN_CODEGEN_PHASE(m_func, Js::EmitterPhase);
// Copy to permanent buffer.
diff --git a/deps/chakrashim/core/lib/Backend/EquivalentTypeSet.cpp b/deps/chakrashim/core/lib/Backend/EquivalentTypeSet.cpp
index d345708d79c..53e892e1789 100644
--- a/deps/chakrashim/core/lib/Backend/EquivalentTypeSet.cpp
+++ b/deps/chakrashim/core/lib/Backend/EquivalentTypeSet.cpp
@@ -162,4 +162,4 @@ void EquivalentTypeSet::SortAndRemoveDuplicates()
this->sortedAndDuplicatesRemoved = true;
}
}
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/FixedFieldInfo.cpp b/deps/chakrashim/core/lib/Backend/FixedFieldInfo.cpp
index b3f04c2f2de..947fbb226d8 100644
--- a/deps/chakrashim/core/lib/Backend/FixedFieldInfo.cpp
+++ b/deps/chakrashim/core/lib/Backend/FixedFieldInfo.cpp
@@ -14,16 +14,16 @@ FixedFieldInfo::PopulateFixedField(_In_opt_ Js::Type * type, _In_opt_ Js::Var va
FixedFieldIDL * rawFF = fixed->GetRaw();
rawFF->fieldValue = var;
rawFF->nextHasSameFixedField = false;
- if (var != nullptr && Js::JavascriptFunction::Is(var))
+ if (var != nullptr && Js::VarIs(var))
{
- Js::JavascriptFunction * funcObj = Js::JavascriptFunction::FromVar(var);
+ Js::JavascriptFunction * funcObj = Js::VarTo(var);
rawFF->valueType = ValueType::FromObject(funcObj).GetRawData();
rawFF->funcInfoAddr = (void*)funcObj->GetFunctionInfo();
rawFF->isClassCtor = funcObj->GetFunctionInfo()->IsClassConstructor();
rawFF->localFuncId = funcObj->GetFunctionInfo()->GetLocalFunctionId();
- if (Js::ScriptFunction::Is(var))
+ if (Js::VarIs(var))
{
- rawFF->environmentAddr = (void*)Js::ScriptFunction::FromVar(funcObj)->GetEnvironment();
+ rawFF->environmentAddr = (void*)Js::VarTo(funcObj)->GetEnvironment();
}
}
if (type != nullptr)
diff --git a/deps/chakrashim/core/lib/Backend/FlowGraph.cpp b/deps/chakrashim/core/lib/Backend/FlowGraph.cpp
index 645b58e7601..2fce12dcc68 100644
--- a/deps/chakrashim/core/lib/Backend/FlowGraph.cpp
+++ b/deps/chakrashim/core/lib/Backend/FlowGraph.cpp
@@ -3424,7 +3424,7 @@ FlowGraph::RemoveInstr(IR::Instr *instr, GlobOpt * globOpt)
* - When we restore HeapArguments object in the bail out path, it expects the scope object also to be restored - if one was created.
*/
Js::OpCode opcode = instr->m_opcode;
- if (opcode == Js::OpCode::LdElemI_A && instr->DoStackArgsOpt(this->func) &&
+ if (opcode == Js::OpCode::LdElemI_A && instr->DoStackArgsOpt() &&
globOpt->CurrentBlockData()->IsArgumentsOpnd(instr->GetSrc1()) && instr->m_func->GetScopeObjSym())
{
IR::ByteCodeUsesInstr * byteCodeUsesInstr = IR::ByteCodeUsesInstr::New(instr);
@@ -5266,7 +5266,7 @@ BasicBlock::MergePredBlocksValueMaps(GlobOpt* globOpt)
}
if(symsRequiringCompensationToMergedValueInfoMap.Count() != 0)
{
- globOpt->InsertValueCompensation(pred, this, &symsRequiringCompensationToMergedValueInfoMap);
+ globOpt->InsertValueCompensation(pred, symsRequiringCompensationToMergedValueInfoMap);
}
}
} NEXT_PREDECESSOR_EDGE_EDITING;
@@ -5325,12 +5325,6 @@ BasicBlock::MergePredBlocksValueMaps(GlobOpt* globOpt)
loop->liveFieldsOnEntry = JitAnew(globOpt->alloc, BVSparse, globOpt->alloc);
loop->liveFieldsOnEntry->Copy(this->globOptData.liveFields);
- if (symsRequiringCompensationToMergedValueInfoMap.Count() != 0)
- {
- loop->symsRequiringCompensationToMergedValueInfoMap = JitAnew(globOpt->alloc, SymToValueInfoMap, globOpt->alloc);
- loop->symsRequiringCompensationToMergedValueInfoMap->Copy(&symsRequiringCompensationToMergedValueInfoMap);
- }
-
if(globOpt->DoBoundCheckHoist() && loop->inductionVariables)
{
globOpt->FinalizeInductionVariables(loop, &blockData);
diff --git a/deps/chakrashim/core/lib/Backend/FlowGraph.h b/deps/chakrashim/core/lib/Backend/FlowGraph.h
index 0d516222075..24c8e61798a 100644
--- a/deps/chakrashim/core/lib/Backend/FlowGraph.h
+++ b/deps/chakrashim/core/lib/Backend/FlowGraph.h
@@ -575,7 +575,6 @@ class Loop
BVSparse *lossyInt32SymsOnEntry; // see GlobOptData::liveLossyInt32Syms
BVSparse *float64SymsOnEntry;
BVSparse *liveFieldsOnEntry;
- SymToValueInfoMap *symsRequiringCompensationToMergedValueInfoMap;
BVSparse *symsUsedBeforeDefined; // stack syms that are live in the landing pad, and used before they are defined in the loop
BVSparse *likelyIntSymsUsedBeforeDefined; // stack syms that are live in the landing pad with a likely-int value, and used before they are defined in the loop
@@ -588,7 +587,6 @@ class Loop
// cleanup in PreOptPeep in the pre-pass of a loop. For aggressively transferring
// values in prepass, we need to know if a source sym was ever assigned to in a loop.
BVSparse *symsAssignedToInLoop;
- BVSparse *preservesNumberValue;
BailOutInfo * bailOutInfo;
IR::BailOutInstr * toPrimitiveSideEffectCheck;
@@ -696,7 +694,6 @@ class Loop
// Temporary map to reuse existing startIndexOpnd while emitting
// 0 = !increment & !alreadyChanged, 1 = !increment & alreadyChanged, 2 = increment & !alreadyChanged, 3 = increment & alreadyChanged
IR::RegOpnd* startIndexOpndCache[4];
- IR::Instr* instr;
} MemOpInfo;
bool doMemOp : 1;
@@ -734,7 +731,6 @@ class Loop
symsAssignedToInLoop(nullptr),
needImplicitCallBailoutChecksForJsArrayCheckHoist(false),
inductionVariables(nullptr),
- preservesNumberValue(nullptr),
dominatingLoopCountableBlock(nullptr),
loopCount(nullptr),
loopCountBasedBoundBaseSyms(nullptr),
@@ -745,8 +741,7 @@ class Loop
allFieldsKilled(false),
isLeaf(true),
isProcessed(false),
- initialValueFieldMap(alloc),
- symsRequiringCompensationToMergedValueInfoMap(nullptr)
+ initialValueFieldMap(alloc)
{
this->loopNumber = ++func->loopCount;
}
diff --git a/deps/chakrashim/core/lib/Backend/Func.cpp b/deps/chakrashim/core/lib/Backend/Func.cpp
index 898d054fa7f..c95c6bfed27 100644
--- a/deps/chakrashim/core/lib/Backend/Func.cpp
+++ b/deps/chakrashim/core/lib/Backend/Func.cpp
@@ -58,6 +58,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
m_bailoutReturnValueSym(nullptr),
m_hasBailedOutSym(nullptr),
m_inlineeFrameStartSym(nullptr),
+ inlineeStart(nullptr),
m_regsUsed(0),
m_fg(nullptr),
m_labelCount(0),
@@ -65,6 +66,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
m_hasCalls(false),
m_hasInlineArgsOpt(false),
m_canDoInlineArgsOpt(true),
+ unoptimizableArgumentsObjReference(0),
m_doFastPaths(false),
hasBailout(false),
firstIRTemp(0),
@@ -92,6 +94,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
hasInlinee(false),
thisOrParentInlinerHasArguments(false),
hasStackArgs(false),
+ hasArgLenAndConstOpt(false),
hasImplicitParamLoad(false),
hasThrow(false),
hasNonSimpleParams(false),
@@ -106,6 +109,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
loopCount(0),
callSiteIdInParentFunc(callSiteIdInParentFunc),
isGetterSetter(isGetterSetter),
+ cachedInlineeFrameInfo(nullptr),
frameInfo(nullptr),
isTJLoopBody(false),
m_nativeCodeDataSym(nullptr),
@@ -301,8 +305,10 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
Js::ScriptContextProfiler *const codeGenProfiler, const bool isBackgroundJIT)
{
bool rejit;
+ int rejitCounter = 0;
do
{
+ Assert(rejitCounter < 25);
Func func(alloc, workItem, threadContextInfo,
scriptContextInfo, outputData, epInfo, runtimeInfo,
polymorphicInlineCacheInfo, codeGenAllocators,
@@ -334,6 +340,8 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
case RejitReason::DisableStackArgOpt:
outputData->disableStackArgOpt = TRUE;
break;
+ case RejitReason::DisableStackArgLenAndConstOpt:
+ break;
case RejitReason::DisableSwitchOptExpectingInteger:
case RejitReason::DisableSwitchOptExpectingString:
outputData->disableSwitchOpt = TRUE;
@@ -345,12 +353,6 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
case RejitReason::TrackIntOverflowDisabled:
outputData->disableTrackCompoundedIntOverflow = TRUE;
break;
- case RejitReason::MemOpDisabled:
- outputData->disableMemOp = TRUE;
- break;
- case RejitReason::FailedEquivalentTypeCheck:
- // No disable flag. The thrower of the re-jit exception must guarantee that objtypespec is disabled where appropriate.
- break;
default:
Assume(UNREACHED);
}
@@ -366,6 +368,7 @@ Func::Codegen(JitArenaAllocator *alloc, JITTimeWorkItem * workItem,
}
rejit = true;
+ rejitCounter++;
}
// Either the entry point has a reference to the number now, or we failed to code gen and we
// don't need to numbers, we can flush the completed page now.
@@ -1130,12 +1133,6 @@ Func::IsTrackCompoundedIntOverflowDisabled() const
return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsTrackCompoundedIntOverflowDisabled()) || m_output.IsTrackCompoundedIntOverflowDisabled();
}
-bool
-Func::IsMemOpDisabled() const
-{
- return (HasProfileInfo() && GetReadOnlyProfileInfo()->IsMemOpDisabled()) || m_output.IsMemOpDisabled();
-}
-
bool
Func::IsArrayCheckHoistDisabled() const
{
@@ -1524,12 +1521,6 @@ Func::GetObjTypeSpecFldInfo(const uint index) const
return GetWorkItem()->GetJITTimeInfo()->GetObjTypeSpecFldInfo(index);
}
-void
-Func::ClearObjTypeSpecFldInfo(const uint index)
-{
- GetWorkItem()->GetJITTimeInfo()->ClearObjTypeSpecFldInfo(index);
-}
-
ObjTypeSpecFldInfo*
Func::GetGlobalObjTypeSpecFldInfo(uint propertyInfoId) const
{
@@ -1691,14 +1682,14 @@ Func::LinkCtorCacheToPropertyId(Js::PropertyId propertyId, JITTimeConstructorCac
JITTimeConstructorCache* Func::GetConstructorCache(const Js::ProfileId profiledCallSiteId)
{
- AssertOrFailFast(profiledCallSiteId < GetJITFunctionBody()->GetProfiledCallSiteCount());
+ Assert(profiledCallSiteId < GetJITFunctionBody()->GetProfiledCallSiteCount());
Assert(this->constructorCaches != nullptr);
return this->constructorCaches[profiledCallSiteId];
}
void Func::SetConstructorCache(const Js::ProfileId profiledCallSiteId, JITTimeConstructorCache* constructorCache)
{
- AssertOrFailFast(profiledCallSiteId < GetJITFunctionBody()->GetProfiledCallSiteCount());
+ Assert(profiledCallSiteId < GetJITFunctionBody()->GetProfiledCallSiteCount());
Assert(constructorCache != nullptr);
Assert(this->constructorCaches != nullptr);
Assert(this->constructorCaches[profiledCallSiteId] == nullptr);
diff --git a/deps/chakrashim/core/lib/Backend/Func.h b/deps/chakrashim/core/lib/Backend/Func.h
index 03eedb5f448..a964d32044b 100644
--- a/deps/chakrashim/core/lib/Backend/Func.h
+++ b/deps/chakrashim/core/lib/Backend/Func.h
@@ -119,11 +119,11 @@ class Func
Js::RegSlot returnValueRegSlot = Js::Constants::NoRegister, const bool isInlinedConstructor = false,
Js::ProfileId callSiteIdInParentFunc = UINT16_MAX, bool isGetterSetter = false);
public:
- void * const GetCodeGenAllocators()
+ void * GetCodeGenAllocators()
{
return this->GetTopFunc()->m_codeGenAllocators;
}
- InProcCodeGenAllocators * const GetInProcCodeGenAllocators()
+ InProcCodeGenAllocators * GetInProcCodeGenAllocators()
{
Assert(!JITManager::GetJITManager()->IsJITServer());
return reinterpret_cast(this->GetTopFunc()->m_codeGenAllocators);
@@ -274,7 +274,7 @@ class Func
return &m_output;
}
- const JITTimeFunctionBody * const GetJITFunctionBody() const
+ const JITTimeFunctionBody * GetJITFunctionBody() const
{
return m_workItem->GetJITFunctionBody();
}
@@ -517,6 +517,17 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
m_inlineeFrameStartSym = sym;
}
+ void SetInlineeStart(IR::Instr *inlineeStartInstr)
+ {
+ Assert(inlineeStart == nullptr);
+ inlineeStart = inlineeStartInstr;
+ }
+
+ IR::Instr* GetInlineeStart()
+ {
+ return inlineeStart;
+ }
+
IR::SymOpnd *GetInlineeArgCountSlotOpnd()
{
return GetInlineeOpndAtOffset(Js::Constants::InlineeMetaArgIndex_Argc * MachPtr);
@@ -579,7 +590,6 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
Js::Var AllocateNumber(double value);
ObjTypeSpecFldInfo* GetObjTypeSpecFldInfo(const uint index) const;
- void ClearObjTypeSpecFldInfo(const uint index);
ObjTypeSpecFldInfo* GetGlobalObjTypeSpecFldInfo(uint propertyInfoId) const;
// Gets an inline cache pointer to use in jitted code. Cached data may not be stable while jitting. Does not return null.
@@ -715,13 +725,16 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
StackSym * tempSymDouble;
StackSym * tempSymBool;
uint32 loopCount;
+ uint32 unoptimizableArgumentsObjReference;
Js::ProfileId callSiteIdInParentFunc;
+ InlineeFrameInfo* cachedInlineeFrameInfo;
bool m_hasCalls: 1; // This is more accurate compared to m_isLeaf
bool m_hasInlineArgsOpt : 1;
bool m_doFastPaths : 1;
bool hasBailout: 1;
bool hasBailoutInEHRegion : 1;
bool hasStackArgs: 1;
+ bool hasArgLenAndConstOpt : 1;
bool hasImplicitParamLoad : 1; // True if there is a load of CallInfo, FunctionObject
bool hasThrow : 1;
bool hasUnoptimizedArgumentsAccess : 1; // True if there are any arguments access beyond the simple case of this.apply pattern
@@ -996,7 +1009,6 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
void SetScopeObjSym(StackSym * sym);
StackSym * GetScopeObjSym();
bool IsTrackCompoundedIntOverflowDisabled() const;
- bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
@@ -1041,6 +1053,7 @@ static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
Func * const topFunc;
Func * const parentFunc;
StackSym * m_inlineeFrameStartSym;
+ IR::Instr * inlineeStart;
uint maxInlineeArgOutSize;
const bool m_isBackgroundJIT;
bool hasInstrNumber;
diff --git a/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.cpp b/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.cpp
index a6c0fb897f6..a6b3b409b11 100644
--- a/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.cpp
+++ b/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.cpp
@@ -302,27 +302,15 @@ FunctionJITTimeInfo::GetRuntimeInfo() const
ObjTypeSpecFldInfo *
FunctionJITTimeInfo::GetObjTypeSpecFldInfo(uint index) const
{
+ AssertOrFailFast(index < GetBody()->GetInlineCacheCount());
if (m_data.objTypeSpecFldInfoArray == nullptr)
{
return nullptr;
}
- AssertOrFailFast(index < m_data.objTypeSpecFldInfoCount);
return reinterpret_cast(m_data.objTypeSpecFldInfoArray[index]);
}
-void
-FunctionJITTimeInfo::ClearObjTypeSpecFldInfo(uint index)
-{
- if (m_data.objTypeSpecFldInfoArray == nullptr)
- {
- return;
- }
- AssertOrFailFast(index < m_data.objTypeSpecFldInfoCount);
-
- m_data.objTypeSpecFldInfoArray[index] = nullptr;
-}
-
ObjTypeSpecFldInfo *
FunctionJITTimeInfo::GetGlobalObjTypeSpecFldInfo(uint index) const
{
diff --git a/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.h b/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.h
index e204c1395a9..f52a3dea421 100644
--- a/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.h
+++ b/deps/chakrashim/core/lib/Backend/FunctionJITTimeInfo.h
@@ -38,7 +38,6 @@ class FunctionJITTimeInfo
const BVFixed * GetInlineesBV() const;
const FunctionJITTimeInfo * GetJitTimeDataFromFunctionInfoAddr(intptr_t polyFuncInfo) const;
ObjTypeSpecFldInfo * GetObjTypeSpecFldInfo(uint index) const;
- void ClearObjTypeSpecFldInfo(uint index);
ObjTypeSpecFldInfo * GetGlobalObjTypeSpecFldInfo(uint index) const;
uint GetGlobalObjTypeSpecFldInfoCount() const;
const FunctionJITRuntimeInfo * GetInlineeForTargetInlineeRuntimeData(const Js::ProfileId profiledCallSiteId, intptr_t inlineeFuncBodyAddr) const;
diff --git a/deps/chakrashim/core/lib/Backend/GlobOpt.cpp b/deps/chakrashim/core/lib/Backend/GlobOpt.cpp
index 33bff2865ef..b292f3cd3c4 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOpt.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOpt.cpp
@@ -87,6 +87,7 @@ GlobOpt::GlobOpt(Func * func)
updateInductionVariableValueNumber(false),
isPerformingLoopBackEdgeCompensation(false),
currentRegion(nullptr),
+ auxSlotPtrSyms(nullptr),
changedSymsAfterIncBailoutCandidate(nullptr),
doTypeSpec(
!IsTypeSpecPhaseOff(func)),
@@ -174,7 +175,7 @@ GlobOpt::Optimize()
// Still need to run the dead store phase to calculate the live reg on back edge
this->BackwardPass(Js::DeadStorePhase);
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(nullptr);
return;
}
@@ -353,6 +354,8 @@ GlobOpt::ForwardPass()
// changedSymsAfterIncBailoutCandidate helps track building incremental bailout in ForwardPass
this->changedSymsAfterIncBailoutCandidate = JitAnew(alloc, BVSparse, alloc);
+ this->auxSlotPtrSyms = JitAnew(alloc, BVSparse, alloc);
+
#if DBG
this->byteCodeUsesBeforeOpt = JitAnew(this->alloc, BVSparse, this->alloc);
if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && this->DoFunctionFieldCopyProp())
@@ -434,6 +437,7 @@ GlobOpt::ForwardPass()
// this->alloc will be freed right after return, no need to free it here
this->changedSymsAfterIncBailoutCandidate = nullptr;
+ this->auxSlotPtrSyms = nullptr;
END_CODEGEN_PHASE(this->func, Js::ForwardPhase);
}
@@ -457,7 +461,7 @@ GlobOpt::OptBlock(BasicBlock *block)
{
loop->fieldPRESymStores->Or(loop->parent->fieldPRESymStores);
}
-
+
if (!this->IsLoopPrePass() && DoFieldPRE(loop))
{
// Note: !IsLoopPrePass means this was a root loop pre-pass. FieldPre() is called once per loop.
@@ -486,7 +490,7 @@ GlobOpt::OptBlock(BasicBlock *block)
{
this->KillAllFields(CurrentBlockData()->liveFields);
}
-
+
this->tempAlloc->Reset();
if(loop && block->isLoopHeader)
@@ -599,11 +603,6 @@ GlobOpt::OptBlock(BasicBlock *block)
this->tempBv->And(liveOnBackEdge);
this->ToFloat64(this->tempBv, block->loop->landingPad);
- if (block->loop->symsRequiringCompensationToMergedValueInfoMap)
- {
- InsertValueCompensation(block, succ, block->loop->symsRequiringCompensationToMergedValueInfoMap);
- }
-
// Now that we're done with the liveFields within this loop, trim the set to those syms
// that the backward pass told us were live out of the loop.
// This assumes we have no further need of the liveFields within the loop.
@@ -891,7 +890,7 @@ GlobOpt::ToTypeSpec(BVSparse *bv, BasicBlock *block, IRType t
// instruction itself should disable arguments object optimization.
if(block->globOptData.argObjSyms && block->globOptData.IsArgumentsSymID(id))
{
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(nullptr);
}
if (block->globOptData.liveVarSyms->Test(id))
@@ -972,7 +971,7 @@ BOOL GlobOpt::PRE::PreloadPRECandidate(Loop *loop, GlobHashBucket* candidate)
// We'll have to add a def instruction for the object sym in the landing pad, and then we can continue
// pre-loading the current PRE candidate.
// Case in point:
- // $L1
+ // $L1
// value|symStore
// t1 = o.x (v1|t3)
// t2 = t1.y (v2|t4) <-- t1 is not live in the loop landing pad
@@ -1032,15 +1031,15 @@ BOOL GlobOpt::PRE::PreloadPRECandidate(Loop *loop, GlobHashBucket* candidate)
ldInstr->SetDst(IR::RegOpnd::New(symStore->AsStackSym(), TyVar, this->globOpt->func));
loop->fieldPRESymStores->Set(symStore->m_id);
landingPad->globOptData.liveVarSyms->Set(symStore->m_id);
-
+
Value * objPtrValue = landingPad->globOptData.FindValue(objPtrSym);
objPtrCopyPropSym = objPtrCopyPropSym ? objPtrCopyPropSym : objPtrValue ? landingPad->globOptData.GetCopyPropSym(objPtrSym, objPtrValue) : nullptr;
if (objPtrCopyPropSym)
{
- // If we inserted T4 = T1.y, and T3 is the copy prop sym for T1 in the landing pad, we need T3.y
- // to be live on back edges to have the merge produce a value for T3.y. Having a value for T1.y
- // produced from the merge is not enough as the T1.y in the loop will get obj-ptr-copy-propped to
+ // If we inserted T4 = T1.y, and T3 is the copy prop sym for T1 in the landing pad, we need T3.y
+ // to be live on back edges to have the merge produce a value for T3.y. Having a value for T1.y
+ // produced from the merge is not enough as the T1.y in the loop will get obj-ptr-copy-propped to
// T3.y
// T3.y
@@ -1156,21 +1155,14 @@ void GlobOpt::FieldPRE(Loop *loop)
void GlobOpt::InsertValueCompensation(
BasicBlock *const predecessor,
- BasicBlock *const successor,
- const SymToValueInfoMap *symsRequiringCompensationToMergedValueInfoMap)
+ const SymToValueInfoMap &symsRequiringCompensationToMergedValueInfoMap)
{
Assert(predecessor);
- Assert(successor);
- AssertOrFailFast(predecessor != successor);
- Assert(symsRequiringCompensationToMergedValueInfoMap->Count() != 0);
+ Assert(symsRequiringCompensationToMergedValueInfoMap.Count() != 0);
IR::Instr *insertBeforeInstr = predecessor->GetLastInstr();
Func *const func = insertBeforeInstr->m_func;
bool setLastInstrInPredecessor;
- // If this is a loop back edge, and the successor has been completed, don't attempt to update its block data.
- // The update is unnecessary, and the data has likely been freed.
- bool updateSuccessorBlockData = !this->isPerformingLoopBackEdgeCompensation || successor->GetDataUseCount() > 0;
-
if(insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget)
{
// Don't insert code between the branch and the corresponding ByteCodeUses instructions
@@ -1189,7 +1181,7 @@ void GlobOpt::InsertValueCompensation(
}
GlobOptBlockData &predecessorBlockData = predecessor->globOptData;
- GlobOptBlockData &successorBlockData = successor->globOptData;
+ GlobOptBlockData &successorBlockData = *CurrentBlockData();
struct DelayChangeValueInfo
{
Value* predecessorValue;
@@ -1205,7 +1197,7 @@ void GlobOpt::InsertValueCompensation(
}
};
JsUtil::List delayChangeValueInfo(alloc);
- for(auto it = symsRequiringCompensationToMergedValueInfoMap->GetIterator(); it.IsValid(); it.MoveNext())
+ for(auto it = symsRequiringCompensationToMergedValueInfoMap.GetIterator(); it.IsValid(); it.MoveNext())
{
const auto &entry = it.Current();
Sym *const sym = entry.Key();
@@ -1248,7 +1240,7 @@ void GlobOpt::InsertValueCompensation(
{
IR::Instr *const newInstr =
IR::Instr::New(
- Js::OpCode::Ld_A,
+ Js::OpCode::Ld_I4,
IR::RegOpnd::New(mergedHeadSegmentLengthSym, mergedHeadSegmentLengthSym->GetType(), func),
IR::RegOpnd::New(predecessorHeadSegmentLengthSym, predecessorHeadSegmentLengthSym->GetType(), func),
func);
@@ -1261,34 +1253,30 @@ void GlobOpt::InsertValueCompensation(
// Merge the head segment length value
Assert(predecessorBlockData.liveVarSyms->Test(predecessorHeadSegmentLengthSym->m_id));
predecessorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id);
+ successorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id);
Value *const predecessorHeadSegmentLengthValue =
predecessorBlockData.FindValue(predecessorHeadSegmentLengthSym);
Assert(predecessorHeadSegmentLengthValue);
predecessorBlockData.SetValue(predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthSym);
-
- if (updateSuccessorBlockData)
+ Value *const mergedHeadSegmentLengthValue = successorBlockData.FindValue(mergedHeadSegmentLengthSym);
+ if(mergedHeadSegmentLengthValue)
{
- successorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id);
- Value *const mergedHeadSegmentLengthValue = successorBlockData.FindValue(mergedHeadSegmentLengthSym);
- if(mergedHeadSegmentLengthValue)
- {
- Assert(mergedHeadSegmentLengthValue->GetValueNumber() != predecessorHeadSegmentLengthValue->GetValueNumber());
- if(predecessorHeadSegmentLengthValue->GetValueInfo() != mergedHeadSegmentLengthValue->GetValueInfo())
- {
- mergedHeadSegmentLengthValue->SetValueInfo(
- ValueInfo::MergeLikelyIntValueInfo(
- this->alloc,
- mergedHeadSegmentLengthValue,
- predecessorHeadSegmentLengthValue,
- mergedHeadSegmentLengthValue->GetValueInfo()->Type()
- .Merge(predecessorHeadSegmentLengthValue->GetValueInfo()->Type())));
- }
- }
- else
+ Assert(mergedHeadSegmentLengthValue->GetValueNumber() != predecessorHeadSegmentLengthValue->GetValueNumber());
+ if(predecessorHeadSegmentLengthValue->GetValueInfo() != mergedHeadSegmentLengthValue->GetValueInfo())
{
- successorBlockData.SetValue(CopyValue(predecessorHeadSegmentLengthValue), mergedHeadSegmentLengthSym);
+ mergedHeadSegmentLengthValue->SetValueInfo(
+ ValueInfo::MergeLikelyIntValueInfo(
+ this->alloc,
+ mergedHeadSegmentLengthValue,
+ predecessorHeadSegmentLengthValue,
+ mergedHeadSegmentLengthValue->GetValueInfo()->Type()
+ .Merge(predecessorHeadSegmentLengthValue->GetValueInfo()->Type())));
}
}
+ else
+ {
+ successorBlockData.SetValue(CopyValue(predecessorHeadSegmentLengthValue), mergedHeadSegmentLengthSym);
+ }
}
if(mergedLengthSym && predecessorLengthSym != mergedLengthSym)
@@ -1308,32 +1296,28 @@ void GlobOpt::InsertValueCompensation(
// Merge the length value
Assert(predecessorBlockData.liveVarSyms->Test(predecessorLengthSym->m_id));
predecessorBlockData.liveVarSyms->Set(mergedLengthSym->m_id);
+ successorBlockData.liveVarSyms->Set(mergedLengthSym->m_id);
Value *const predecessorLengthValue = predecessorBlockData.FindValue(predecessorLengthSym);
Assert(predecessorLengthValue);
predecessorBlockData.SetValue(predecessorLengthValue, mergedLengthSym);
-
- if (updateSuccessorBlockData)
+ Value *const mergedLengthValue = successorBlockData.FindValue(mergedLengthSym);
+ if(mergedLengthValue)
{
- successorBlockData.liveVarSyms->Set(mergedLengthSym->m_id);
- Value *const mergedLengthValue = successorBlockData.FindValue(mergedLengthSym);
- if(mergedLengthValue)
- {
- Assert(mergedLengthValue->GetValueNumber() != predecessorLengthValue->GetValueNumber());
- if(predecessorLengthValue->GetValueInfo() != mergedLengthValue->GetValueInfo())
- {
- mergedLengthValue->SetValueInfo(
- ValueInfo::MergeLikelyIntValueInfo(
- this->alloc,
- mergedLengthValue,
- predecessorLengthValue,
- mergedLengthValue->GetValueInfo()->Type().Merge(predecessorLengthValue->GetValueInfo()->Type())));
- }
- }
- else
+ Assert(mergedLengthValue->GetValueNumber() != predecessorLengthValue->GetValueNumber());
+ if(predecessorLengthValue->GetValueInfo() != mergedLengthValue->GetValueInfo())
{
- successorBlockData.SetValue(CopyValue(predecessorLengthValue), mergedLengthSym);
+ mergedLengthValue->SetValueInfo(
+ ValueInfo::MergeLikelyIntValueInfo(
+ this->alloc,
+ mergedLengthValue,
+ predecessorLengthValue,
+ mergedLengthValue->GetValueInfo()->Type().Merge(predecessorLengthValue->GetValueInfo()->Type())));
}
}
+ else
+ {
+ successorBlockData.SetValue(CopyValue(predecessorLengthValue), mergedLengthSym);
+ }
}
if(compensated)
@@ -1521,7 +1505,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
if (instr->m_func->GetJITFunctionBody()->GetInParamsCount() != 1 && !instr->m_func->IsStackArgsEnabled())
{
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
}
else
{
@@ -1536,7 +1520,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
// In the debug mode, we don't want to optimize away the aliases. Since we may have to show them on the inspection.
if (((!AreFromSameBytecodeFunc(src1->AsRegOpnd(), dst->AsRegOpnd()) || this->currentBlock->loop) && instr->m_opcode != Js::OpCode::BytecodeArgOutCapture) || this->func->IsJitInDebugMode())
{
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
if(!dst->AsRegOpnd()->GetStackSym()->m_nonEscapingArgObjAlias)
@@ -1559,7 +1543,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
}
SymID id = 0;
-
+
switch(instr->m_opcode)
{
case Js::OpCode::LdElemI_A:
@@ -1570,7 +1554,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
if (indexOpnd && CurrentBlockData()->IsArgumentsSymID(indexOpnd->m_sym->m_id))
{
// Pathological test cases such as a[arguments]
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
@@ -1597,6 +1581,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
if (CurrentBlockData()->IsArgumentsOpnd(src1))
{
instr->usesStackArgumentsObject = true;
+ instr->m_func->unoptimizableArgumentsObjReference++;
}
if (CurrentBlockData()->IsArgumentsOpnd(src1) &&
@@ -1616,6 +1601,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
if (builtinFunction == Js::BuiltinFunction::JavascriptFunction_Apply)
{
CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd());
+ instr->m_func->unoptimizableArgumentsObjReference--;
}
}
else if (builtinOpnd->IsRegOpnd())
@@ -1623,6 +1609,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
if (builtinOpnd->AsRegOpnd()->m_sym->m_builtInIndex == Js::BuiltinFunction::JavascriptFunction_Apply)
{
CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd());
+ instr->m_func->unoptimizableArgumentsObjReference--;
}
}
}
@@ -1659,7 +1646,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset());
}
#endif
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
}
@@ -1677,7 +1664,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset());
}
#endif
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
}
@@ -1696,7 +1683,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset());
}
#endif
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
}
@@ -1710,7 +1697,7 @@ GlobOpt::OptArguments(IR::Instr *instr)
WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset());
}
#endif
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(instr->m_func);
return;
}
CurrentBlockData()->ClearArgumentsSym(dst->AsRegOpnd());
@@ -1860,10 +1847,6 @@ GlobOpt::IsAllowedForMemOpt(IR::Instr* instr, bool isMemset, IR::RegOpnd *baseOp
return false;
}
}
- else
- {
- return false;
- }
if (!baseValueType.IsTypedArray())
{
@@ -2099,7 +2082,6 @@ bool GlobOpt::CollectMemcopyStElementI(IR::Instr *instr, Loop *loop)
// Consider: Can we remove the count field?
memcopyInfo->count++;
- AssertOrFailFast(memcopyInfo->count <= 1);
memcopyInfo->base = baseSymID;
return true;
@@ -2161,46 +2143,27 @@ GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Va
return false;
}
break;
+ case Js::OpCode::Decr_A:
+ isIncr = false;
+ case Js::OpCode::Incr_A:
+ isChangedByOne = true;
+ goto MemOpCheckInductionVariable;
case Js::OpCode::Sub_I4:
+ case Js::OpCode::Sub_A:
isIncr = false;
+ case Js::OpCode::Add_A:
case Js::OpCode::Add_I4:
{
- // The only case in which these OpCodes can contribute to an inductionVariableChangeInfo
- // is when the induction variable is being modified and overwritten aswell (ex: j = j + 1)
- // and not when the induction variable is modified but not overwritten (ex: k = j + 1).
- // This can either be detected in IR as
- // s1 = Add_I4 s1 1 // Case #1, can be seen with "j++".
- // or as
- // s4(s2) = Add_I4 s3(s1) 1 // Case #2, can be see with "j = j + 1".
- // s1 = Ld_A s2
- bool isInductionVar = false;
- IR::Instr* nextInstr = instr->m_next;
- if (
- // Checks for Case #1 and Case #2
- instr->GetDst()->GetStackSym() != nullptr &&
- instr->GetDst()->IsRegOpnd() &&
- (
- // Checks for Case #1
- (instr->GetDst()->GetStackSym() == instr->GetSrc1()->GetStackSym()) ||
-
- // Checks for Case #2
- (nextInstr&& nextInstr->m_opcode == Js::OpCode::Ld_A &&
- nextInstr->GetSrc1()->IsRegOpnd() &&
- nextInstr->GetDst()->IsRegOpnd() &&
- GetVarSymID(instr->GetDst()->GetStackSym()) == nextInstr->GetSrc1()->GetStackSym()->m_id &&
- GetVarSymID(instr->GetSrc1()->GetStackSym()) == nextInstr->GetDst()->GetStackSym()->m_id)
- )
- )
+MemOpCheckInductionVariable:
+ StackSym *sym = instr->GetSrc1()->GetStackSym();
+ if (!sym)
{
- isInductionVar = true;
+ sym = instr->GetSrc2()->GetStackSym();
}
-
- // Even if dstIsInductionVar then dst == src1 so it's safe to use src1 as the induction sym always.
- StackSym* sym = instr->GetSrc1()->GetStackSym();
SymID inductionSymID = GetVarSymID(sym);
- if (isInductionVar && IsSymIDInductionVariable(inductionSymID, this->currentBlock->loop))
+ if (IsSymIDInductionVariable(inductionSymID, this->currentBlock->loop))
{
if (!isChangedByOne)
{
@@ -2258,13 +2221,7 @@ GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Va
{
Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 };
inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(inductionSymID, inductionVariableChangeInfo);
-
- // If inductionVariableChangeInfo.unroll has been invalidated, do
- // not modify the Js::Constants::InvalidLoopUnrollFactor value
- if (inductionVariableChangeInfo.unroll != Js::Constants::InvalidLoopUnrollFactor)
- {
- inductionVariableChangeInfo.unroll++;
- }
+ inductionVariableChangeInfo.unroll++;
inductionVariableChangeInfo.isIncremental = isIncr;
loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo);
}
@@ -2302,27 +2259,6 @@ GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Va
}
}
NEXT_INSTR_IN_RANGE;
- IR::Instr* prevInstr = instr->m_prev;
-
- // If an instr where the dst is an induction variable (and thus is being written to) is not caught by a case in the above
- // switch statement (which implies that this instr does not contributes to a inductionVariableChangeInfo) and in the default
- // case does not set doMemOp to false (which implies that this instr does not invalidate this MemOp), then FailFast as we
- // should not be performing a MemOp under these conditions.
- AssertOrFailFast(!instr->GetDst() || instr->m_opcode == Js::OpCode::IncrLoopBodyCount || !loop->memOpInfo ||
-
- // Refer to "Case #2" described above in this function. For the following IR:
- // Line #1: s4(s2) = Add_I4 s3(s1) 1
- // Line #2: s3(s1) = Ld_A s4(s2)
- // do not consider line #2 as a violating instr
- (instr->m_opcode == Js::OpCode::Ld_I4 &&
- prevInstr && (prevInstr->m_opcode == Js::OpCode::Add_I4 || prevInstr->m_opcode == Js::OpCode::Sub_I4) &&
- instr->GetSrc1()->IsRegOpnd() &&
- instr->GetDst()->IsRegOpnd() &&
- prevInstr->GetDst()->IsRegOpnd() &&
- instr->GetDst()->GetStackSym() == prevInstr->GetSrc1()->GetStackSym() &&
- instr->GetSrc1()->GetStackSym() == prevInstr->GetDst()->GetStackSym()) ||
-
- !loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(GetVarSymID(instr->GetDst()->GetStackSym())));
}
return true;
@@ -2467,7 +2403,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
//StackArguments Optimization - We bail out if the index is out of range of actuals.
if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) &&
- instr->DoStackArgsOpt(this->func) && !this->IsLoopPrePass())
+ instr->DoStackArgsOpt() && !this->IsLoopPrePass())
{
GenerateBailAtOperation(&instr, IR::BailOnStackArgsOutOfActualsRange);
}
@@ -2509,6 +2445,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
OptimizeChecks(instr);
OptArraySrc(&instr, &src1Val, &src2Val);
OptNewScObject(&instr, src1Val);
+ OptStackArgLenAndConst(instr, &src1Val);
instr = this->OptPeep(instr, src1Val, src2Val);
@@ -2683,7 +2620,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
!(instr->IsJitProfilingInstr()) &&
this->currentBlock->loop && !IsLoopPrePass() &&
!func->IsJitInDebugMode() &&
- !func->IsMemOpDisabled() &&
+ (func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) &&
this->currentBlock->loop->doMemOp)
{
CollectMemOpInfo(instrPrev, instr, src1Val, src2Val);
@@ -2753,54 +2690,6 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved)
return instrNext;
}
-bool
-GlobOpt::IsNonNumericRegOpnd(IR::RegOpnd *opnd, bool inGlobOpt, bool *isSafeToTransferInPrepass /*=nullptr*/) const
-{
- if (opnd == nullptr)
- {
- return false;
- }
-
- if (opnd->m_sym->m_isNotNumber)
- {
- return true;
- }
-
- if (!inGlobOpt)
- {
- return false;
- }
-
- if (opnd->GetValueType().IsNumber() || currentBlock->globOptData.IsTypeSpecialized(opnd->m_sym))
- {
- if (!this->IsLoopPrePass())
- {
- return false;
- }
-
- Value * opndValue = this->currentBlock->globOptData.FindValue(opnd->m_sym);
- ValueInfo * opndValueInfo = opndValue ? opndValue->GetValueInfo() : nullptr;
- if (!opndValueInfo)
- {
- return true;
- }
-
- bool isSafeToTransfer = this->IsSafeToTransferInPrepass(opnd->m_sym, opndValueInfo);
- if (isSafeToTransferInPrepass != nullptr)
- {
- *isSafeToTransferInPrepass = isSafeToTransfer;
- }
- if (this->prePassLoop->preservesNumberValue->Test(opnd->m_sym->m_id))
- {
- return false;
- }
-
- return !isSafeToTransfer;
- }
-
- return true;
-}
-
bool
GlobOpt::OptTagChecks(IR::Instr *instr)
{
@@ -2858,14 +2747,11 @@ GlobOpt::OptTagChecks(IR::Instr *instr)
ChangeValueType(nullptr, value, valueType.SetCanBeTaggedValue(false), true /*preserveSubClassInfo*/);
return false;
}
- if (!this->IsLoopPrePass())
+ if (this->byteCodeUses)
{
- if (this->byteCodeUses)
- {
- this->InsertByteCodeUses(instr);
- }
- this->currentBlock->RemoveInstr(instr);
+ this->InsertByteCodeUses(instr);
}
+ this->currentBlock->RemoveInstr(instr);
return true;
}
@@ -2965,6 +2851,12 @@ GlobOpt::OptDst(
{
this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd());
}
+ else if (instr->m_opcode == Js::OpCode::StElemI_A ||
+ instr->m_opcode == Js::OpCode::StElemI_A_Strict ||
+ instr->m_opcode == Js::OpCode::InitComputedProperty)
+ {
+ this->KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
+ }
if (opnd->IsIndirOpnd() && !this->IsLoopPrePass())
{
@@ -3171,7 +3063,7 @@ GlobOpt::SetLoopFieldInitialValue(Loop *loop, IR::Instr *instr, PropertySym *pro
Value *landingPadObjPtrVal, *currentObjPtrVal;
landingPadObjPtrVal = loop->landingPad->globOptData.FindValue(objectSym);
currentObjPtrVal = CurrentBlockData()->FindValue(objectSym);
-
+
auto CanSetInitialValue = [&]() -> bool {
if (!currentObjPtrVal)
{
@@ -3365,14 +3257,10 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
}
originalPropertySym = sym->AsPropertySym();
- // Don't give a value to 'arguments' property sym to prevent field copy prop of 'arguments'
+ // Dont give a vale to 'arguments' property sym to prevent field copy prop of 'arguments'
if (originalPropertySym->AsPropertySym()->m_propertyId == Js::PropertyIds::arguments &&
originalPropertySym->AsPropertySym()->m_fieldKind == PropertyKindData)
{
- if (opnd->AsSymOpnd()->IsPropertySymOpnd())
- {
- this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd());
- }
return nullptr;
}
@@ -3380,7 +3268,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
opnd->AsSymOpnd()->SetPropertyOwnerValueType(
objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized);
-
+
sym = this->CopyPropPropertySymObj(opnd->AsSymOpnd(), instr);
if (!DoFieldCopyProp())
@@ -3430,7 +3318,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
}
}
}
- break;
+ break;
}
case IR::OpndKindReg:
// Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous
@@ -3566,7 +3454,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
if (profiledArrayType.IsLikelyObject())
{
// Ideally we want to use the most specialized type seen by this path, but when that causes bailouts use the least specialized type instead.
- if (useAggressiveSpecialization &&
+ if (useAggressiveSpecialization &&
profiledArrayType.GetObjectType() == valueType.GetObjectType() &&
!valueType.IsLikelyNativeIntArray() &&
(
@@ -3577,7 +3465,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
valueType = profiledArrayType.SetHasNoMissingValues(valueType.HasNoMissingValues());
ChangeValueType(this->currentBlock, CurrentBlockData()->FindValue(opnd->AsRegOpnd()->m_sym), valueType, false);
}
- else if (!useAggressiveSpecialization &&
+ else if (!useAggressiveSpecialization &&
(profiledArrayType.GetObjectType() != valueType.GetObjectType() ||
(
valueType.IsLikelyNativeArray() &&
@@ -3603,7 +3491,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I
opnd->SetValueType(valueType);
- if(!IsLoopPrePass() && opnd->IsSymOpnd() && (valueType.IsDefinite() || valueType.IsNotTaggedValue()))
+ if(!IsLoopPrePass() && opnd->IsSymOpnd() && valueType.IsDefinite())
{
if (opnd->AsSymOpnd()->m_sym->IsPropertySym())
{
@@ -4924,7 +4812,7 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
}
else
{
- return NewGenericValue(src1ValueInfo->Type().ToDefiniteAnyNumber().SetCanBeTaggedValue(true), dst);
+ return NewGenericValue(src1ValueInfo->Type().ToDefiniteAnyNumber(), dst);
}
break;
@@ -4985,7 +4873,7 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
{
valueType = ValueType::Number;
}
- return CreateDstUntransferredValue(valueType.SetCanBeTaggedValue(true), instr, src1Val, src2Val);
+ return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val);
}
case Js::OpCode::Add_A:
@@ -5019,12 +4907,12 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
{
// If one of them is a float, the result probably is a float instead of just int
// but should always be a number.
- valueType = ValueType::Float.SetCanBeTaggedValue(true);
+ valueType = ValueType::Float;
}
else
{
// Could be int, could be number
- valueType = ValueType::Number.SetCanBeTaggedValue(true);
+ valueType = ValueType::Number;
}
}
else if (src1ValueInfo->IsLikelyFloat() || src2ValueInfo->IsLikelyFloat())
@@ -5048,7 +4936,7 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
&& (src2Val && src2ValueInfo->IsNotString() && src2ValueInfo->IsPrimitive()))
{
// If src1 and src2 are not strings and primitive, add should yield a number.
- valueType = ValueType::Number.SetCanBeTaggedValue(true);
+ valueType = ValueType::Number;
}
else if((src1Val && src1ValueInfo->IsLikelyString()) || (src2Val && src2ValueInfo->IsLikelyString()))
{
@@ -5069,7 +4957,7 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
ValueType divValueType = GetDivValueType(instr, src1Val, src2Val, false);
if (divValueType.IsLikelyInt() || divValueType.IsFloat())
{
- return CreateDstUntransferredValue(divValueType.SetCanBeTaggedValue(true), instr, src1Val, src2Val);
+ return CreateDstUntransferredValue(divValueType, instr, src1Val, src2Val);
}
}
// fall-through
@@ -5101,11 +4989,11 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
// This should ideally be NewNumberAndLikelyFloatValue since we know the result is a number but not sure if it will
// be a float value. However, that Number/LikelyFloat value type doesn't exist currently and all the necessary
// checks are done for float values (tagged int checks, etc.) so it's sufficient to just create a float value here.
- valueType = ValueType::Float.SetCanBeTaggedValue(true);
+ valueType = ValueType::Float;
}
else
{
- valueType = ValueType::Number.SetCanBeTaggedValue(true);
+ valueType = ValueType::Number;
}
return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val);
@@ -5213,6 +5101,18 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val)
case Js::OpCode::IsInst:
case Js::OpCode::LdTrue:
case Js::OpCode::LdFalse:
+ case Js::OpCode::CmEq_A:
+ case Js::OpCode::CmSrEq_A:
+ case Js::OpCode::CmNeq_A:
+ case Js::OpCode::CmSrNeq_A:
+ case Js::OpCode::CmLe_A:
+ case Js::OpCode::CmUnLe_A:
+ case Js::OpCode::CmLt_A:
+ case Js::OpCode::CmUnLt_A:
+ case Js::OpCode::CmGe_A:
+ case Js::OpCode::CmUnGe_A:
+ case Js::OpCode::CmGt_A:
+ case Js::OpCode::CmUnGt_A:
return this->NewGenericValue(ValueType::Boolean, dst);
case Js::OpCode::LdUndef:
@@ -5285,10 +5185,10 @@ GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal)
}
}
}
-
+
IR::IndirOpnd *src = instr->GetSrc1()->AsIndirOpnd();
const ValueType baseValueType(src->GetBaseOpnd()->GetValueType());
- if (instr->DoStackArgsOpt(this->func) ||
+ if (instr->DoStackArgsOpt() ||
!(
baseValueType.IsLikelyOptimizedTypedArray() ||
(baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr()) // Specialized native array lowering for LdElem requires that it is profiled.
@@ -5312,7 +5212,7 @@ GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal)
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
- instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") :
+ instr->DoStackArgsOpt() ? _u("instruction uses the arguments object") :
baseValueType.IsLikelyOptimizedTypedArray() ? _u("index is negative or likely not int") : _u("of array type"));
Output::Flush();
}
@@ -5552,8 +5452,7 @@ GlobOpt::GetPrepassValueTypeForDst(
IR::Instr *const instr,
Value *const src1Value,
Value *const src2Value,
- bool const isValueInfoPrecise,
- bool const isSafeToTransferInPrepass) const
+ bool const isValueInfoPrecise) const
{
// Values with definite types can be created in the loop prepass only when it is guaranteed that the value type will be the
// same on any iteration of the loop. The heuristics currently used are:
@@ -5570,13 +5469,13 @@ GlobOpt::GetPrepassValueTypeForDst(
Assert(IsLoopPrePass());
Assert(instr);
- if(!isValueInfoPrecise)
+ if(!desiredValueType.IsDefinite())
{
- if(!desiredValueType.IsDefinite())
- {
- return isSafeToTransferInPrepass ? desiredValueType : desiredValueType.SetCanBeTaggedValue(true);
- }
+ return desiredValueType;
+ }
+ if(!isValueInfoPrecise)
+ {
// If the desired value type is not precise, the value type of the destination is derived from the value types of the
// sources. Since the value type of a source sym is not definite, the destination value type also cannot be definite.
if(desiredValueType.IsInt() && OpCodeAttr::IsInt32(instr->m_opcode))
@@ -5589,7 +5488,6 @@ GlobOpt::GetPrepassValueTypeForDst(
// The op always produces a number, but not always an int
return desiredValueType.ToDefiniteAnyNumber();
}
- // Note: ToLikely() also sets CanBeTaggedValue
return desiredValueType.ToLikely();
}
@@ -5667,7 +5565,7 @@ GlobOpt::SafeToCopyPropInPrepass(StackSym * const originalSym, StackSym * const
Assert(this->currentBlock->globOptData.GetCopyPropSym(originalSym, value) == copySym);
// In the following example, to copy-prop s2 into s1, it is not enough to check if s1 and s2 are safe to transfer.
- // In fact, both s1 and s2 are safe to transfer, but it is not legal to copy prop s2 into s1.
+ // In fact, both s1 and s2 are safe to transfer, but it is not legal to copy prop s2 into s1.
//
// s1 = s2
// $Loop:
@@ -5678,7 +5576,7 @@ GlobOpt::SafeToCopyPropInPrepass(StackSym * const originalSym, StackSym * const
// In general, requirements for copy-propping in prepass are more restricted than those for transferring values.
// For copy prop in prepass, if the original sym is live on back-edge, then the copy-prop sym should not be written to
// in the loop (or its parents)
-
+
ValueInfo* const valueInfo = value->GetValueInfo();
return IsSafeToTransferInPrepass(originalSym, valueInfo) &&
IsSafeToTransferInPrepass(copySym, valueInfo) &&
@@ -5865,8 +5763,8 @@ GlobOpt::ValueNumberTransferDstInPrepass(IR::Instr *const instr, Value *const sr
// for aggressive int type spec.
bool isSafeToTransferInPrepass = false;
isValueInfoPrecise = IsPrepassSrcValueInfoPrecise(instr, src1Val, nullptr, &isSafeToTransferInPrepass);
-
- const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, isValueInfoPrecise, isSafeToTransferInPrepass));
+
+ const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, isValueInfoPrecise));
if(isValueInfoPrecise || isSafeToTransferInPrepass)
{
Assert(valueType == src1ValueInfo->Type());
@@ -6583,6 +6481,12 @@ GlobOpt::GetConstantVar(IR::Opnd *opnd, Value *val)
return Js::TaggedInt::ToVarUnchecked(opnd->AsIntConstOpnd()->AsInt32());
}
}
+#if FLOATVAR
+ else if (opnd->IsFloatConstOpnd())
+ {
+ return Js::JavascriptNumber::ToVar(opnd->AsFloatConstOpnd()->m_value);
+ }
+#endif
else if (opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->IsSingleDef())
{
if (valueInfo->IsBoolean())
@@ -6604,19 +6508,110 @@ GlobOpt::GetConstantVar(IR::Opnd *opnd, Value *val)
{
return (Js::Var)this->func->GetScriptContextInfo()->GetNullAddr();
}
+#if FLOATVAR
+ else if (valueInfo->IsFloat())
+ {
+ IR::Instr * defInstr = opnd->AsRegOpnd()->m_sym->GetInstrDef();
+ if (defInstr->m_opcode == Js::OpCode::LdC_F8_R8 && defInstr->GetSrc1()->IsFloatConstOpnd())
+ {
+ return Js::JavascriptNumber::ToVar(defInstr->GetSrc1()->AsFloatConstOpnd()->m_value);
+ }
+ }
+#endif
}
return nullptr;
}
-bool BoolAndIntStaticAndTypeMismatch(Value* src1Val, Value* src2Val, Js::Var src1Var, Js::Var src2Var)
+namespace
{
- ValueInfo *src1ValInfo = src1Val->GetValueInfo();
- ValueInfo *src2ValInfo = src2Val->GetValueInfo();
- return (src1ValInfo->IsNumber() && src1Var && src2ValInfo->IsBoolean() && src1Var != Js::TaggedInt::ToVarUnchecked(0) && src1Var != Js::TaggedInt::ToVarUnchecked(1)) ||
- (src2ValInfo->IsNumber() && src2Var && src1ValInfo->IsBoolean() && src2Var != Js::TaggedInt::ToVarUnchecked(0) && src2Var != Js::TaggedInt::ToVarUnchecked(1));
-}
+ bool TryCompIntAndFloat(bool * result, Js::Var left, Js::Var right)
+ {
+ if (Js::TaggedInt::Is(left))
+ {
+ // If both are tagged ints we should not get here.
+ Assert(!Js::TaggedInt::Is(right));
+ if (Js::JavascriptNumber::Is_NoTaggedIntCheck(right))
+ {
+ double value = Js::JavascriptNumber::GetValue(right);
+ *result = (Js::TaggedInt::ToInt32(left) == value);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool Op_JitEq(bool * result, Value * src1Val, Value * src2Val, Js::Var src1Var, Js::Var src2Var, Func * func, bool isStrict)
+ {
+ Assert(src1Val != nullptr && src2Val != nullptr);
+ Assert(src1Var != nullptr && src2Var != nullptr);
+ if (src1Var == src2Var)
+ {
+ if (Js::TaggedInt::Is(src1Var))
+ {
+ *result = true;
+ return true;
+ }
+
+ if (!isStrict && src1Val->GetValueInfo()->IsNotFloat())
+ {
+ // If the vars are equal and they are not NaN, non-strict equal returns true. Not float guarantees not NaN.
+ *result = true;
+ return true;
+ }
+
+#if FLOATVAR
+ if (Js::JavascriptNumber::Is_NoTaggedIntCheck(src1Var))
+ {
+ *result = !Js::JavascriptNumber::IsNan(Js::JavascriptNumber::GetValue(src1Var));
+ return true;
+ }
+#endif
+
+ if (src1Var == reinterpret_cast(func->GetScriptContextInfo()->GetTrueAddr()) ||
+ src1Var == reinterpret_cast(func->GetScriptContextInfo()->GetFalseAddr()) ||
+ src1Var == reinterpret_cast(func->GetScriptContextInfo()->GetNullAddr()) ||
+ src1Var == reinterpret_cast(func->GetScriptContextInfo()->GetUndefinedAddr()))
+ {
+ *result = true;
+ return true;
+ }
+
+ // Other var comparisons require the runtime to prove.
+ return false;
+ }
+
+#if FLOATVAR
+ if (TryCompIntAndFloat(result, src1Var, src2Var) || TryCompIntAndFloat(result, src2Var, src1Var))
+ {
+ return true;
+ }
+
+#endif
+
+ return false;
+ }
+
+ bool Op_JitNeq(bool * result, Value * src1Val, Value * src2Val, Js::Var src1Var, Js::Var src2Var, Func * func, bool isStrict)
+ {
+ if (Op_JitEq(result, src1Val, src2Val, src1Var, src2Var, func, isStrict))
+ {
+ *result = !*result;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool BoolAndIntStaticAndTypeMismatch(Value* src1Val, Value* src2Val, Js::Var src1Var, Js::Var src2Var)
+ {
+ ValueInfo *src1ValInfo = src1Val->GetValueInfo();
+ ValueInfo *src2ValInfo = src2Val->GetValueInfo();
+ return (src1ValInfo->IsNumber() && src1Var && src2ValInfo->IsBoolean() && src1Var != Js::TaggedInt::ToVarUnchecked(0) && src1Var != Js::TaggedInt::ToVarUnchecked(1)) ||
+ (src2ValInfo->IsNumber() && src2Var && src1ValInfo->IsBoolean() && src2Var != Js::TaggedInt::ToVarUnchecked(0) && src2Var != Js::TaggedInt::ToVarUnchecked(1));
+ }
+}
bool
GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2Val, Js::Var src1Var, Js::Var src2Var, bool *result)
@@ -6744,12 +6739,10 @@ GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2
}
else
{
- if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
+ if (!Op_JitEq(result, src1Val, src2Val, src1Var, src2Var, this->func, false /* isStrict */))
{
- // TODO: OOP JIT, const folding
return false;
}
- *result = Js::JavascriptOperators::Equal(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrNeq_A:
@@ -6776,12 +6769,10 @@ GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2
}
else
{
- if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
+ if (!Op_JitNeq(result, src1Val, src2Val, src1Var, src2Var, this->func, false /* isStrict */))
{
- // TODO: OOP JIT, const folding
return false;
}
- *result = Js::JavascriptOperators::NotEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
case Js::OpCode::BrSrEq_A:
@@ -6817,12 +6808,10 @@ GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2
}
else
{
- if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
+ if (!Op_JitEq(result, src1Val, src2Val, src1Var, src2Var, this->func, true /* isStrict */))
{
- // TODO: OOP JIT, const folding
return false;
}
- *result = Js::JavascriptOperators::StrictEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
@@ -6859,12 +6848,10 @@ GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2
}
else
{
- if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
+ if (!Op_JitNeq(result, src1Val, src2Val, src1Var, src2Var, this->func, true /* isStrict */))
{
- // TODO: OOP JIT, const folding
return false;
}
- *result = Js::JavascriptOperators::NotStrictEqual(src1Var, src2Var, this->func->GetScriptContext());
}
break;
@@ -6884,16 +6871,36 @@ GlobOpt::CanProveConditionalBranch(IR::Instr *instr, Value *src1Val, Value *src2
break;
}
- if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts))
+ if (!src1Var)
{
- // TODO: OOP JIT, const folding
return false;
}
- if (!src1Var)
+
+ // Set *result = (evaluates true) and negate it later for BrFalse
+ if (src1Var == reinterpret_cast(this->func->GetScriptContextInfo()->GetTrueAddr()))
+ {
+ *result = true;
+ }
+ else if (src1Var == reinterpret_cast(this->func->GetScriptContextInfo()->GetFalseAddr()))
+ {
+ *result = false;
+ }
+ else if (Js::TaggedInt::Is(src1Var))
+ {
+ *result = (src1Var != reinterpret_cast(Js::AtomTag_IntPtr));
+ }
+#if FLOATVAR
+ else if (Js::JavascriptNumber::Is_NoTaggedIntCheck(src1Var))
+ {
+ double value = Js::JavascriptNumber::GetValue(src1Var);
+ *result = (!Js::JavascriptNumber::IsNan(value)) && (!Js::JavascriptNumber::IsZero(value));
+ }
+#endif
+ else
{
return false;
}
- *result = Js::JavascriptConversion::ToBoolean(src1Var, this->func->GetScriptContext());
+
if (instr->m_opcode == Js::OpCode::BrFalse_A)
{
*result = !(*result);
@@ -9342,7 +9349,7 @@ GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc
bool isConservativeMulInt = !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec();
// Be conservative about predicting Mul overflow in prepass.
- // Operands that are live on back edge may be denied lossless-conversion to int32 and
+ // Operands that are live on back edge may be denied lossless-conversion to int32 and
// trigger rejit with AggressiveIntTypeSpec off.
// Besides multiplying a variable in a loop can overflow in just a few iterations even in simple cases like v *= 2
// So, make sure we definitely know the source max/min values, otherwise assume the full range.
@@ -10578,6 +10585,7 @@ GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2
bool skipSrc1 = false;
bool skipSrc2 = false;
bool skipDst = false;
+ bool convertDstToBool = false;
if (!this->DoFloatTypeSpec())
{
@@ -10649,6 +10657,36 @@ GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2
skipDst = true;
break;
+ case Js::OpCode::CmEq_A:
+ case Js::OpCode::CmSrEq_A:
+ case Js::OpCode::CmNeq_A:
+ case Js::OpCode::CmSrNeq_A:
+ {
+ if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber())
+ {
+ return false;
+ }
+
+ allowUndefinedOrNullSrc1 = false;
+ allowUndefinedOrNullSrc2 = false;
+ convertDstToBool = true;
+ break;
+ }
+
+ case Js::OpCode::CmLe_A:
+ case Js::OpCode::CmLt_A:
+ case Js::OpCode::CmGe_A:
+ case Js::OpCode::CmGt_A:
+ {
+ if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber())
+ {
+ return false;
+ }
+
+ convertDstToBool = true;
+ break;
+ }
+
default:
return false;
}
@@ -10694,13 +10732,19 @@ GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2
if (!skipDst)
{
dst = instr->GetDst();
-
if (dst)
{
- *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Val, src2Val);
-
- AssertMsg(dst->IsRegOpnd(), "What else?");
- this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock);
+ if (convertDstToBool)
+ {
+ *pDstVal = CreateDstUntransferredValue(ValueType::Boolean, instr, src1Val, src2Val);
+ ToVarRegOpnd(dst->AsRegOpnd(), currentBlock);
+ }
+ else
+ {
+ *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Val, src2Val);
+ AssertMsg(dst->IsRegOpnd(), "What else?");
+ this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock);
+ }
}
}
@@ -10724,7 +10768,7 @@ GlobOpt::TypeSpecializeStElem(IR::Instr ** pInstr, Value *src1Val, Value **pDstV
IR::RegOpnd *baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd();
ValueType baseValueType(baseOpnd->GetValueType());
- if (instr->DoStackArgsOpt(this->func) ||
+ if (instr->DoStackArgsOpt() ||
(!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) ||
(!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) ||
!(baseValueType.IsLikelyOptimizedTypedArray() || baseValueType.IsLikelyNativeArray()))
@@ -10740,7 +10784,7 @@ GlobOpt::TypeSpecializeStElem(IR::Instr ** pInstr, Value *src1Val, Value **pDstV
this->func->GetDebugNumberSet(debugStringBuffer),
Js::OpCodeUtil::GetOpCodeName(instr->m_opcode),
baseValueTypeStr,
- instr->DoStackArgsOpt(this->func) ?
+ instr->DoStackArgsOpt() ?
_u("instruction uses the arguments object") :
_u("typed array type specialization is disabled, or base is not an optimized typed array"));
Output::Flush();
@@ -11065,7 +11109,7 @@ GlobOpt::ToVarUses(IR::Instr *instr, IR::Opnd *opnd, bool isDst, Value *val)
return instr;
}
-IR::Instr *
+IR::Instr *
GlobOpt::ToTypeSpecIndex(IR::Instr * instr, IR::RegOpnd * indexOpnd, IR::IndirOpnd * indirOpnd)
{
Assert(indirOpnd != nullptr || indexOpnd == instr->GetSrc1());
@@ -12934,26 +12978,6 @@ GlobOpt::ProcessValueKills(IR::Instr *const instr)
it.RemoveCurrent();
}
}
- else if(kills.KillsObjectArraysWithNoMissingValues())
- {
- // Some operations may kill objects with arrays-with-no-missing-values in unlikely circumstances. Convert their value types to likely
- // versions so that the checks have to be redone.
- for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext())
- {
- Value *const value = it.CurrentValue();
- ValueInfo *const valueInfo = value->GetValueInfo();
- Assert(
- valueInfo->IsArrayOrObjectWithArray() ||
- valueInfo->IsOptimizedVirtualTypedArray() ||
- valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym());
- if(!valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsArray() || !valueInfo->HasNoMissingValues())
- {
- continue;
- }
- ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false);
- it.RemoveCurrent();
- }
- }
if(kills.KillsNativeArrays())
{
@@ -13209,33 +13233,86 @@ GlobOpt::OptArraySrc(IR::Instr ** const instrRef, Value ** src1Val, Value ** src
}
void
-GlobOpt::ProcessNoImplicitCallArrayUses(IR::RegOpnd * baseOpnd, IR::ArrayRegOpnd * baseArrayOpnd, IR::Instr * instr, bool isLikelyJsArray, bool useNoMissingValues)
+GlobOpt::OptStackArgLenAndConst(IR::Instr* instr, Value** src1Val)
{
- if (isLikelyJsArray)
- {
- // Insert an instruction to indicate to the dead-store pass that implicit calls need to be kept disabled until this
- // instruction. Operations other than LdElem, StElem and IsIn don't benefit much from arrays having no missing values,
- // so no need to ensure that the array still has no missing values. For a particular array, if none of the accesses
- // benefit much from the no-missing-values information, it may be beneficial to avoid checking for no missing
- // values, especially in the case for a single array access, where the cost of the check could be relatively
- // significant. An StElem has to do additional checks in the common path if the array may have missing values, and
- // a StElem that operates on an array that has no missing values is more likely to keep the no-missing-values info
- // on the array more precise, so it still benefits a little from the no-missing-values info.
- this->CaptureNoImplicitCallUses(baseOpnd, isLikelyJsArray);
- }
- else if (baseArrayOpnd && baseArrayOpnd->HeadSegmentLengthSym())
+ if (!PHASE_OFF(Js::StackArgLenConstOptPhase, instr->m_func) && instr->m_func->IsStackArgsEnabled() && instr->usesStackArgumentsObject && instr->IsInlined())
{
- // A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the typed
- // array's length is set to zero. Insert an instruction to indicate to the dead-store pass that implicit calls need to
- // be disabled until this instruction.
- IR::RegOpnd *const headSegmentLengthOpnd =
- IR::RegOpnd::New(
- baseArrayOpnd->HeadSegmentLengthSym(),
- baseArrayOpnd->HeadSegmentLengthSym()->GetType(),
- instr->m_func);
+ IR::Opnd* src1 = instr->GetSrc1();
+ auto replaceInstr = [&](IR::Opnd* newopnd, Js::OpCode opcode)
+ {
+ if (PHASE_TESTTRACE(Js::StackArgLenConstOptPhase, instr->m_func))
+ {
+ Output::Print(_u("Inlined function %s have replaced opcode %s with opcode %s for stack arg optimization. \n"), instr->m_func->GetJITFunctionBody()->GetDisplayName(),
+ Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), Js::OpCodeUtil::GetOpCodeName(opcode));
+ Output::Flush();
+ }
+ this->CaptureByteCodeSymUses(instr);
+ instr->m_opcode = opcode;
+ instr->ReplaceSrc1(newopnd);
+ if (instr->HasBailOutInfo())
+ {
+ instr->ClearBailOutInfo();
+ }
+ *src1Val = this->OptSrc(instr->GetSrc1(), &instr);
+ instr->m_func->hasArgLenAndConstOpt = true;
+ };
+ Assert(CurrentBlockData()->IsArgumentsOpnd(src1));
+ switch(instr->m_opcode)
+ {
+ case Js::OpCode::LdLen_A:
+ {
+ IR::AddrOpnd* newopnd = IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(instr->m_func->actualCount - 1), IR::AddrOpndKindConstantVar, instr->m_func);
+ replaceInstr(newopnd, Js::OpCode::Ld_A);
+ break;
+ }
+ case Js::OpCode::LdElemI_A:
+ case Js::OpCode::TypeofElem:
+ {
+ IR::IndirOpnd* indirOpndSrc1 = src1->AsIndirOpnd();
+ if (!indirOpndSrc1->GetIndexOpnd())
+ {
+ int argIndex = indirOpndSrc1->GetOffset() + 1;
+ IR::Instr* defInstr = nullptr;
+ IR::Instr* inlineeStart = instr->m_func->GetInlineeStart();
+ inlineeStart->IterateArgInstrs([&](IR::Instr* argInstr) {
+ StackSym *argSym = argInstr->GetDst()->AsSymOpnd()->m_sym->AsStackSym();
+ if (argSym->GetArgSlotNum() - 1 == argIndex)
+ {
+ defInstr = argInstr;
+ return true;
+ }
+ return false;
+ });
- const IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd(headSegmentLengthOpnd, instr->m_func);
- this->CaptureNoImplicitCallUses(headSegmentLengthOpnd, false);
+ Js::OpCode replacementOpcode;
+ if (instr->m_opcode == Js::OpCode::TypeofElem)
+ {
+ replacementOpcode = Js::OpCode::Typeof;
+ }
+ else
+ {
+ replacementOpcode = Js::OpCode::Ld_A;
+ }
+
+ // If we cannot find the right instruction. I.E. When calling arguments[2] and no arguments were passed to the func
+ if (defInstr == nullptr)
+ {
+ IR::Opnd * undefined = IR::AddrOpnd::New(instr->m_func->GetScriptContextInfo()->GetUndefinedAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true);
+ undefined->SetValueType(ValueType::Undefined);
+ replaceInstr(undefined, replacementOpcode);
+ }
+ else
+ {
+ replaceInstr(defInstr->GetSrc1(), replacementOpcode);
+ }
+ }
+ else
+ {
+ instr->m_func->unoptimizableArgumentsObjReference++;
+ }
+ break;
+ }
+ }
}
}
@@ -13357,7 +13434,6 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
const bool useValueTypes = !IsLoopPrePass(); // Source value types are not guaranteed to be correct in a loop prepass
switch(instr->m_opcode)
{
- case Js::OpCode::StElemC:
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
{
@@ -13408,13 +13484,8 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
}
break;
- case Js::OpCode::ConsoleScopedStFld:
- case Js::OpCode::ConsoleScopedStFldStrict:
- case Js::OpCode::ScopedStFld:
- case Js::OpCode::ScopedStFldStrict:
case Js::OpCode::StFld:
case Js::OpCode::StFldStrict:
- case Js::OpCode::StSuperFld:
{
Assert(instr->GetDst());
@@ -13516,11 +13587,6 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
{
kills.SetKillsArrayLengths();
}
-
- if(doArrayMissingValueCheckHoist && !(useValueTypes && arrayValueType.IsArray()))
- {
- kills.SetKillsObjectArraysWithNoMissingValues();
- }
break;
}
@@ -13559,8 +13625,6 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
case IR::HelperArray_Shift:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
- case IR::HelperArray_Concat:
- case IR::HelperArray_Slice:
kills.SetKillsArrayHeadSegments();
kills.SetKillsArrayHeadSegmentLengths();
break;
@@ -13590,7 +13654,6 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
//case IR::HelperArray_Sort:
case IR::HelperArray_Splice:
case IR::HelperArray_Unshift:
- case IR::HelperArray_Concat:
kills.SetKillsNativeArrays();
break;
}
@@ -13614,7 +13677,7 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
kills.SetKillsNativeArrays();
}
break;
- }
+ }
case Js::OpCode::InitClass:
Assert(instr->GetSrc1());
@@ -13632,7 +13695,6 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr)
break;
case Js::OpCode::NewScObjectNoCtor:
- case Js::OpCode::NewScObjectNoCtorFull:
if(doNativeArrayTypeSpec)
{
// Class/object construction can make something a prototype
@@ -15585,7 +15647,7 @@ GlobOpt::DoArrayCheckHoist() const
bool
GlobOpt::DoArrayCheckHoist(const ValueType baseValueType, Loop* loop, IR::Instr const * const instr) const
{
- if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)))
+ if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt()))
{
return false;
}
@@ -15719,7 +15781,7 @@ GlobOpt::DoLdLenIntSpec(IR::Instr * const instr, const ValueType baseValueType)
if(PHASE_OFF(Js::LdLenIntSpecPhase, func) ||
IsTypeSpecPhaseOff(func) ||
(func->HasProfileInfo() && func->GetReadOnlyProfileInfo()->IsLdLenIntSpecDisabled()) ||
- (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)))
+ (instr && !IsLoopPrePass() && instr->DoStackArgsOpt()))
{
return false;
}
@@ -15787,7 +15849,7 @@ GlobOpt::TrackArgumentsObject()
{
if (PHASE_OFF(Js::StackArgOptPhase, this->func))
{
- this->CannotAllocateArgumentsObjectOnStack();
+ this->CannotAllocateArgumentsObjectOnStack(nullptr);
return false;
}
@@ -15795,8 +15857,15 @@ GlobOpt::TrackArgumentsObject()
}
void
-GlobOpt::CannotAllocateArgumentsObjectOnStack()
+GlobOpt::CannotAllocateArgumentsObjectOnStack(Func * curFunc)
{
+ if (curFunc != nullptr && curFunc->hasArgLenAndConstOpt)
+ {
+ Assert(!curFunc->GetJITOutput()->GetOutputData()->disableStackArgOpt);
+ curFunc->GetJITOutput()->GetOutputData()->disableStackArgOpt = true;
+ throw Js::RejitException(RejitReason::DisableStackArgLenAndConstOpt);
+ }
+
func->SetHasStackArgs(false);
#ifdef ENABLE_DEBUG_CONFIG_OPTIONS
@@ -16628,7 +16697,6 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
if (instr->HasBailOutInfo())
{
instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject);
- instr->GetBailOutInfo()->canDeadStore = false;
}
else
{
@@ -16638,11 +16706,6 @@ GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opn
|| (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive())
|| propertySymOpnd == nullptr
|| !propertySymOpnd->IsTypeCheckProtected())
- {
- this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
- instr->GetBailOutInfo()->canDeadStore = false;
- }
- else if (propertySymOpnd->MayHaveImplicitCall())
{
this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject);
}
@@ -16736,7 +16799,6 @@ GlobOpt::GetOrGenerateLoopCountForMemOp(Loop *loop)
IR::Opnd *
GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::Instr *insertBeforeInstr)
{
- AssertOrFailFast(unroll != Js::Constants::InvalidLoopUnrollFactor);
LoopCount *loopCount = loop->loopCount;
IR::Opnd *sizeOpnd = nullptr;
Assert(loopCount);
@@ -16774,25 +16836,17 @@ GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::In
IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc);
- IR::Instr* inductionChangeMultiplier = IR::Instr::New(
- Js::OpCode::Mul_I4, sizeOpnd, loopCountOpnd, unrollOpnd, localFunc);
-
- InsertInstr(inductionChangeMultiplier);
-
- inductionChangeMultiplier->ConvertToBailOutInstr(loop->bailOutInfo, IR::BailOutOnOverflow);
+ InsertInstr(IR::Instr::New(Js::OpCode::Mul_I4,
+ sizeOpnd,
+ loopCountOpnd,
+ unrollOpnd,
+ localFunc));
}
}
else
{
- int32 loopCountMinusOnePlusOne;
- int32 size;
- if (Int32Math::Add(loopCount->LoopCountMinusOneConstantValue(), 1, &loopCountMinusOnePlusOne) ||
- Int32Math::Mul(loopCountMinusOnePlusOne, unroll, &size))
- {
- throw Js::RejitException(RejitReason::MemOpDisabled);
- }
- Assert(size > 0);
+ uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll;
sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc);
}
loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd);
@@ -17055,9 +17109,6 @@ GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitD
memopInstr->SetSrc2(sizeOpnd);
insertBeforeInstr->InsertBefore(memopInstr);
-
- loop->memOpInfo->instr = memopInstr;
-
#if DBG_DUMP
if (DO_MEMOP_TRACE())
{
@@ -17130,28 +17181,11 @@ GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitD
}
#endif
- Assert(noImplicitCallUsesToInsert->Count() == 0);
- bool isLikelyJsArray;
- if (emitData->stElemInstr->GetDst()->IsIndirOpnd())
- {
- baseOpnd = emitData->stElemInstr->GetDst()->AsIndirOpnd()->GetBaseOpnd();
- isLikelyJsArray = baseOpnd->GetValueType().IsLikelyArrayOrObjectWithArray();
- ProcessNoImplicitCallArrayUses(baseOpnd, baseOpnd->IsArrayRegOpnd() ? baseOpnd->AsArrayRegOpnd() : nullptr, emitData->stElemInstr, isLikelyJsArray, true);
- }
RemoveMemOpSrcInstr(memopInstr, emitData->stElemInstr, emitData->block);
if (!isMemset)
{
- IR::Instr* ldElemInstr = ((MemCopyEmitData*)emitData)->ldElemInstr;
- if (ldElemInstr->GetSrc1()->IsIndirOpnd())
- {
- baseOpnd = ldElemInstr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd();
- isLikelyJsArray = baseOpnd->GetValueType().IsLikelyArrayOrObjectWithArray();
- ProcessNoImplicitCallArrayUses(baseOpnd, baseOpnd->IsArrayRegOpnd() ? baseOpnd->AsArrayRegOpnd() : nullptr, ldElemInstr, isLikelyJsArray, true);
- }
- RemoveMemOpSrcInstr(memopInstr, ldElemInstr, emitData->block);
+ RemoveMemOpSrcInstr(memopInstr, ((MemCopyEmitData*)emitData)->ldElemInstr, emitData->block);
}
- InsertNoImplicitCallUses(memopInstr);
- noImplicitCallUsesToInsert->Clear();
}
bool
@@ -17437,7 +17471,7 @@ GlobOpt::PRE::InsertSymDefinitionInLandingPad(StackSym * sym, Loop * loop, Sym *
BasicBlock* loopTail = loop->GetAnyTailBlock();
Value * valueOnBackEdge = loopTail->globOptData.FindValue(propSym);
-
+
// If o.x is not invariant in the loop, we can't use the preloaded value of o.x.y in the landing pad
Value * valueInLandingPad = loop->landingPad->globOptData.FindValue(propSym);
if (valueOnBackEdge->GetValueNumber() != valueInLandingPad->GetValueNumber())
@@ -17460,7 +17494,7 @@ GlobOpt::PRE::InsertSymDefinitionInLandingPad(StackSym * sym, Loop * loop, Sym *
Assert(loop->landingPad->globOptData.IsLive(valueOnBackEdge->GetValueInfo()->GetSymStore()));
// Inserted T3 = o.x
- // Now, we want to
+ // Now, we want to
// 1. Insert T1 = o.x
// 2. Insert T4 = T1.y
// 3. Indentify T3 as the objptr copy prop sym for T1, and make T3.y live on the back-edges
@@ -17630,8 +17664,8 @@ void GlobOpt::PRE::RemoveOverlyOptimisticInitialValues(Loop * loop)
{
BasicBlock * landingPad = loop->landingPad;
- // For a property sym whose obj ptr sym wasn't live in the landing pad, we can optmistically (if the obj ptr sym was
- // single def) insert an initial value in the landing pad, with the hope that PRE could make the obj ptr sym live.
+ // For a property sym whose obj ptr sym wasn't live in the landing pad, we can optimistically (if the obj ptr sym was
+ // single def) insert an initial value in the landing pad, with the hope that PRE could make the obj ptr sym live.
// But, if PRE couldn't make the obj ptr sym live, we need to clear the value for the property sym from the landing pad
for (auto it = loop->initialValueFieldMap.GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext())
diff --git a/deps/chakrashim/core/lib/Backend/GlobOpt.h b/deps/chakrashim/core/lib/Backend/GlobOpt.h
index 725ac0ea975..db5fd1dd3f0 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOpt.h
+++ b/deps/chakrashim/core/lib/Backend/GlobOpt.h
@@ -288,18 +288,14 @@ typedef JsUtil::BaseDictionary Valu
namespace JsUtil
{
template <>
- class ValueEntry : public BaseValueEntry
+ inline void ClearValue::Clear(StackLiteralInitFldData* value)
{
- public:
- void Clear()
- {
#if DBG
- this->value.propIds = nullptr;
- this->value.currentInitFldCount = (uint)-1;
+ value->propIds = nullptr;
+ value->currentInitFldCount = (uint)-1;
#endif
- }
- };
-};
+ }
+}
typedef JsUtil::BaseDictionary IntConstantToStackSymMap;
typedef JsUtil::BaseDictionary IntConstantToValueMap;
@@ -317,7 +313,6 @@ class JsArrayKills
{
bool killsAllArrays : 1;
bool killsArraysWithNoMissingValues : 1;
- bool killsObjectArraysWithNoMissingValues : 1;
bool killsNativeArrays : 1;
bool killsArrayHeadSegments : 1;
bool killsArrayHeadSegmentLengths : 1;
@@ -343,9 +338,6 @@ class JsArrayKills
bool KillsArraysWithNoMissingValues() const { return killsArraysWithNoMissingValues; }
void SetKillsArraysWithNoMissingValues() { killsArraysWithNoMissingValues = true; }
- bool KillsObjectArraysWithNoMissingValues() const { return killsObjectArraysWithNoMissingValues; }
- void SetKillsObjectArraysWithNoMissingValues() { killsObjectArraysWithNoMissingValues = true; }
-
bool KillsNativeArrays() const { return killsNativeArrays; }
void SetKillsNativeArrays() { killsNativeArrays = true; }
@@ -370,7 +362,6 @@ class JsArrayKills
(valueType.IsArrayOrObjectWithArray() &&
(
(killsArraysWithNoMissingValues && valueType.HasNoMissingValues()) ||
- (killsObjectArraysWithNoMissingValues && !valueType.IsArray() && valueType.HasNoMissingValues()) ||
(killsNativeArrays && !valueType.HasVarElements())
)
);
@@ -468,6 +459,8 @@ class GlobOpt
BVSparse * changedSymsAfterIncBailoutCandidate;
+ BVSparse * auxSlotPtrSyms;
+
JitArenaAllocator * alloc;
JitArenaAllocator * tempAlloc;
@@ -566,7 +559,7 @@ class GlobOpt
bool AreFromSameBytecodeFunc(IR::RegOpnd const* src1, IR::RegOpnd const* dst) const;
Value * ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val);
Value * ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal);
- ValueType GetPrepassValueTypeForDst(const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value, bool const isValueInfoPreciseRef = false, bool const isSafeToTransferInPrepass = false) const;
+ ValueType GetPrepassValueTypeForDst(const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value, bool const isValueInfoPreciseRef = false) const;
bool IsPrepassSrcValueInfoPrecise(IR::Opnd *const src, Value *const srcValue, bool * canTransferValueNumberToDst = nullptr) const;
bool IsPrepassSrcValueInfoPrecise(IR::Instr *const instr, Value *const src1Value, Value *const src2Value, bool * canTransferValueNumberToDst = nullptr) const;
bool IsSafeToTransferInPrepass(StackSym * const sym, ValueInfo *const srcValueInfo) const;
@@ -697,6 +690,7 @@ class GlobOpt
IR::Instr* CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, IR::BailOutKind bailoutkind, BailOutInfo* bailoutInfo, Func* func);
IR::Instr* AttachBoundsCheckData(IR::Instr* instr, IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset);
void OptArraySrc(IR::Instr **const instrRef, Value ** src1Val, Value ** src2Val);
+ void OptStackArgLenAndConst(IR::Instr* instr, Value** src1Val);
private:
void TrackIntSpecializedAddSubConstant(IR::Instr *const instr, const AddSubConstantInfo *const addSubConstantInfo, Value *const dstValue, const bool updateSourceBounds);
@@ -722,7 +716,6 @@ class GlobOpt
private:
void CaptureNoImplicitCallUses(IR::Opnd *opnd, const bool usesNoMissingValuesInfo, IR::Instr *const includeCurrentInstr = nullptr);
void InsertNoImplicitCallUses(IR::Instr *const instr);
- void ProcessNoImplicitCallArrayUses(IR::RegOpnd * baseOpnd, IR::ArrayRegOpnd * baseArrayOpnd, IR::Instr * instr, bool isLikelyJsArray, bool useNoMissingValues);
void PrepareLoopArrayCheckHoist();
public:
@@ -743,7 +736,7 @@ class GlobOpt
void PreLowerCanonicalize(IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val);
void ProcessKills(IR::Instr *instr);
void InsertCloneStrs(BasicBlock *toBlock, GlobOptBlockData *toData, GlobOptBlockData *fromData);
- void InsertValueCompensation(BasicBlock *const predecessor, BasicBlock *const successor, const SymToValueInfoMap *symsRequiringCompensationToMergedValueInfoMap);
+ void InsertValueCompensation(BasicBlock *const predecessor, const SymToValueInfoMap &symsRequiringCompensationToMergedValueInfoMap);
IR::Instr * ToVarUses(IR::Instr *instr, IR::Opnd *opnd, bool isDst, Value *val);
void ToVar(BVSparse *bv, BasicBlock *block);
IR::Instr * ToVar(IR::Instr *instr, IR::RegOpnd *regOpnd, BasicBlock *block, Value *val, bool needsUpdate);
@@ -775,8 +768,6 @@ class GlobOpt
const bool lossy = false, const bool forceInvariantHoisting = false, IR::BailOutKind bailoutKind = IR::BailOutInvalid);
void HoistInvariantValueInfo(ValueInfo *const invariantValueInfoToHoist, Value *const valueToUpdate, BasicBlock *const targetBlock);
void OptHoistUpdateValueType(Loop* loop, IR::Instr* instr, IR::Opnd** srcOpndPtr, Value *const srcVal);
- bool IsNonNumericRegOpnd(IR::RegOpnd *opnd, bool inGlobOpt, bool *isSafeToTransferInPrepass = nullptr) const;
-
public:
static bool IsTypeSpecPhaseOff(Func const * func);
static bool DoAggressiveIntTypeSpec(Func const * func);
@@ -899,7 +890,7 @@ class GlobOpt
void KillLiveFields(StackSym * stackSym, BVSparse * bv);
void KillLiveFields(PropertySym * propertySym, BVSparse * bv);
void KillLiveFields(BVSparse *const fieldsToKill, BVSparse *const bv) const;
- void KillLiveElems(IR::IndirOpnd * indirOpnd, IR::Opnd * valueOpnd, BVSparse * bv, bool inGlobOpt, Func *func);
+ void KillLiveElems(IR::IndirOpnd * indirOpnd, BVSparse * bv, bool inGlobOpt, Func *func);
void KillAllFields(BVSparse * bv);
void SetAnyPropertyMayBeWrittenTo();
void AddToPropertiesWrittenTo(Js::PropertyId propertyId);
@@ -923,7 +914,7 @@ class GlobOpt
void UpdateObjPtrValueType(IR::Opnd * opnd, IR::Instr * instr);
bool TrackArgumentsObject();
- void CannotAllocateArgumentsObjectOnStack();
+ void CannotAllocateArgumentsObjectOnStack(Func * curFunc);
#if DBG
bool IsPropertySymId(SymID symId) const;
@@ -945,6 +936,8 @@ class GlobOpt
bool CheckIfInstrInTypeCheckSeqEmitsTypeCheck(IR::Instr* instr, IR::PropertySymOpnd *opnd);
template
bool ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd, BasicBlock* block, bool updateExistingValue, bool* emitsTypeCheckOut = nullptr, bool* changesTypeValueOut = nullptr, bool *isObjTypeChecked = nullptr);
+ StackSym * EnsureAuxSlotPtrSym(IR::PropertySymOpnd *opnd);
+ void KillAuxSlotPtrSyms(IR::PropertySymOpnd *opnd, BasicBlock *block, bool isObjTypeSpecialized);
template
bool MapObjectHeaderInlinedTypeSymsUntil(BasicBlock *block, bool isObjTypeSpecialized, SymID opndId, Fn fn);
void KillObjectHeaderInlinedTypeSyms(BasicBlock *block, bool isObjTypeSpecialized, SymID symId = SymID_Invalid);
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptArrays.cpp b/deps/chakrashim/core/lib/Backend/GlobOptArrays.cpp
index fff1c6e6750..4adce207858 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptArrays.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptArrays.cpp
@@ -151,6 +151,16 @@ bool GlobOpt::ArraySrcOpt::CheckOpCode()
return false;
}
+ if (instr->GetSrc1()->IsAddrOpnd())
+ {
+ const Js::Var val = instr->GetSrc1()->AsAddrOpnd()->m_address;
+ if (Js::TaggedInt::Is(val))
+ {
+ originalIndexOpnd = instr->UnlinkSrc1();
+ instr->SetSrc1(IR::IntConstOpnd::New(Js::TaggedInt::ToInt32(val), TyInt32, instr->m_func));
+ }
+ }
+
if (!instr->GetSrc1()->IsRegOpnd() && !instr->GetSrc1()->IsIntConstOpnd())
{
return false;
@@ -199,7 +209,7 @@ void GlobOpt::ArraySrcOpt::TypeSpecIndex()
{
// If the optimization is unable to eliminate the bounds checks, we need to restore the original var sym.
Assert(originalIndexOpnd == nullptr);
- originalIndexOpnd = instr->GetSrc1()->Copy(func)->AsRegOpnd();
+ originalIndexOpnd = instr->GetSrc1()->Copy(func);
globOpt->ToTypeSpecIndex(instr, instr->GetSrc1()->AsRegOpnd(), nullptr);
}
}
@@ -1736,14 +1746,7 @@ void GlobOpt::ArraySrcOpt::Optimize()
{
if (newBaseValueType != baseValueType)
{
- if (globOpt->IsSafeToTransferInPrePass(baseOpnd, baseValue))
- {
- UpdateValue(nullptr, nullptr, nullptr);
- }
- else if (isLikelyJsArray && globOpt->IsOperationThatLikelyKillsJsArraysWithNoMissingValues(instr) && baseValueInfo->HasNoMissingValues())
- {
- globOpt->ChangeValueType(nullptr, baseValue, baseValueInfo->Type().SetHasNoMissingValues(false), true);
- }
+ UpdateValue(nullptr, nullptr, nullptr);
}
// For javascript arrays and objects with javascript arrays:
@@ -1929,7 +1932,32 @@ void GlobOpt::ArraySrcOpt::Optimize()
baseArrayOpnd = nullptr;
}
- globOpt->ProcessNoImplicitCallArrayUses(baseOpnd, baseArrayOpnd, instr, isLikelyJsArray, isLoad || isStore || instr->m_opcode == Js::OpCode::IsIn);
+ if (isLikelyJsArray)
+ {
+ // Insert an instruction to indicate to the dead-store pass that implicit calls need to be kept disabled until this
+ // instruction. Operations other than LdElem, StElem and IsIn don't benefit much from arrays having no missing values,
+ // so no need to ensure that the array still has no missing values. For a particular array, if none of the accesses
+ // benefit much from the no-missing-values information, it may be beneficial to avoid checking for no missing
+ // values, especially in the case for a single array access, where the cost of the check could be relatively
+ // significant. An StElem has to do additional checks in the common path if the array may have missing values, and
+ // a StElem that operates on an array that has no missing values is more likely to keep the no-missing-values info
+ // on the array more precise, so it still benefits a little from the no-missing-values info.
+ globOpt->CaptureNoImplicitCallUses(baseOpnd, isLoad || isStore || instr->m_opcode == Js::OpCode::IsIn);
+ }
+ else if (baseArrayOpnd && baseArrayOpnd->HeadSegmentLengthSym())
+ {
+ // A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the typed
+ // array's length is set to zero. Insert an instruction to indicate to the dead-store pass that implicit calls need to
+ // be disabled until this instruction.
+ IR::RegOpnd *const headSegmentLengthOpnd =
+ IR::RegOpnd::New(
+ baseArrayOpnd->HeadSegmentLengthSym(),
+ baseArrayOpnd->HeadSegmentLengthSym()->GetType(),
+ instr->m_func);
+
+ const IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd(headSegmentLengthOpnd, instr->m_func);
+ globOpt->CaptureNoImplicitCallUses(headSegmentLengthOpnd, false);
+ }
const auto OnEliminated = [&](const Js::Phase phase, const char *const eliminatedLoad)
{
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptArrays.h b/deps/chakrashim/core/lib/Backend/GlobOptArrays.h
index d6e0b2c7d09..ed99dd78b0d 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptArrays.h
+++ b/deps/chakrashim/core/lib/Backend/GlobOptArrays.h
@@ -52,7 +52,7 @@ class GlobOpt::ArraySrcOpt
IR::IndirOpnd * baseOwnerIndir = nullptr;
IR::RegOpnd * baseOpnd = nullptr;
IR::Opnd * indexOpnd = nullptr;
- IR::RegOpnd * originalIndexOpnd = nullptr;
+ IR::Opnd * originalIndexOpnd = nullptr;
bool isProfilableLdElem = false;
bool isProfilableStElem = false;
bool isLoad = false;
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptBailOut.cpp b/deps/chakrashim/core/lib/Backend/GlobOptBailOut.cpp
index f168545b3b2..f94a7e31acd 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptBailOut.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptBailOut.cpp
@@ -482,6 +482,18 @@ GlobOpt::CaptureByteCodeSymUses(IR::Instr * instr)
void
GlobOpt::ProcessInlineeEnd(IR::Instr* instr)
{
+ if (!PHASE_OFF(Js::StackArgLenConstOptPhase, instr->m_func) && instr->m_func->IsStackArgsEnabled()
+ && instr->m_func->hasArgLenAndConstOpt && instr->m_func->unoptimizableArgumentsObjReference == 0)
+ {
+ instr->m_func->hasUnoptimizedArgumentsAccess = false;
+ if (DoInlineArgsOpt(instr->m_func))
+ {
+ instr->m_func->m_hasInlineArgsOpt = true;
+ Assert(instr->m_func->cachedInlineeFrameInfo);
+ instr->m_func->frameInfo = instr->m_func->cachedInlineeFrameInfo;
+ }
+ }
+
if (instr->m_func->m_hasInlineArgsOpt)
{
RecordInlineeFrameInfo(instr);
@@ -506,7 +518,6 @@ GlobOpt::TrackCalls(IR::Instr * instr)
if (this->currentBlock->globOptData.callSequence == nullptr)
{
this->currentBlock->globOptData.callSequence = JitAnew(this->alloc, SListBase);
- this->currentBlock->globOptData.callSequence = this->currentBlock->globOptData.callSequence;
}
this->currentBlock->globOptData.callSequence->Prepend(this->alloc, instr->GetDst());
@@ -571,6 +582,7 @@ GlobOpt::TrackCalls(IR::Instr * instr)
}
case Js::OpCode::InlineeStart:
+ {
Assert(instr->m_func->GetParentFunc() == this->currentBlock->globOptData.curFunc);
Assert(instr->m_func->GetParentFunc());
this->currentBlock->globOptData.curFunc = instr->m_func;
@@ -578,18 +590,24 @@ GlobOpt::TrackCalls(IR::Instr * instr)
this->func->UpdateMaxInlineeArgOutSize(this->currentBlock->globOptData.inlinedArgOutSize);
this->EndTrackCall(instr);
+ InlineeFrameInfo* inlineeFrameInfo = InlineeFrameInfo::New(instr->m_func->m_alloc);
+ inlineeFrameInfo->functionSymStartValue = instr->GetSrc1()->GetSym() ?
+ CurrentBlockData()->FindValue(instr->GetSrc1()->GetSym()) : nullptr;
+ inlineeFrameInfo->floatSyms = CurrentBlockData()->liveFloat64Syms->CopyNew(this->alloc);
+ inlineeFrameInfo->intSyms = CurrentBlockData()->liveInt32Syms->MinusNew(CurrentBlockData()->liveLossyInt32Syms, this->alloc);
+ inlineeFrameInfo->varSyms = CurrentBlockData()->liveVarSyms->CopyNew(this->alloc);
+
if (DoInlineArgsOpt(instr->m_func))
{
instr->m_func->m_hasInlineArgsOpt = true;
- InlineeFrameInfo* frameInfo = InlineeFrameInfo::New(func->m_alloc);
- instr->m_func->frameInfo = frameInfo;
- frameInfo->functionSymStartValue = instr->GetSrc1()->GetSym() ?
- CurrentBlockData()->FindValue(instr->GetSrc1()->GetSym()) : nullptr;
- frameInfo->floatSyms = CurrentBlockData()->liveFloat64Syms->CopyNew(this->alloc);
- frameInfo->intSyms = CurrentBlockData()->liveInt32Syms->MinusNew(CurrentBlockData()->liveLossyInt32Syms, this->alloc);
- frameInfo->varSyms = CurrentBlockData()->liveVarSyms->CopyNew(this->alloc);
+ instr->m_func->frameInfo = inlineeFrameInfo;
+ }
+ else
+ {
+ instr->m_func->cachedInlineeFrameInfo = inlineeFrameInfo;
}
break;
+ }
case Js::OpCode::EndCallForPolymorphicInlinee:
// Have this opcode mimic the functions of both InlineeStart and InlineeEnd in the bailout block of a polymorphic call inlined using fixed methods.
@@ -860,7 +878,7 @@ void GlobOpt::EndTrackingOfArgObjSymsForInlinee()
// This means there are arguments object symbols in the current function which are not in the current block.
// This could happen when one of the blocks has a throw and arguments object aliased in it and other blocks don't see it.
// Rare case, abort stack arguments optimization in this case.
- CannotAllocateArgumentsObjectOnStack();
+ CannotAllocateArgumentsObjectOnStack(this->currentBlock->globOptData.curFunc);
}
else
{
@@ -1306,7 +1324,7 @@ GlobOpt::MayNeedBailOnImplicitCall(IR::Instr const * instr, Value const * src1Va
return
!(
baseValueType.IsString() ||
- baseValueType.IsArray() ||
+ (baseValueType.IsAnyArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray) ||
(instr->HasBailOutInfo() && instr->GetBailOutKindNoBits() == IR::BailOutOnIrregularLength) // guarantees no implicit calls
);
}
@@ -1337,14 +1355,6 @@ GlobOpt::MayNeedBailOnImplicitCall(IR::Instr const * instr, Value const * src1Va
);
}
- case Js::OpCode::NewScObjectNoCtor:
- if (instr->HasBailOutInfo() && (instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutFailedCtorGuardCheck)
- {
- // No helper call with this bailout.
- return false;
- }
- break;
-
default:
break;
}
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptBlockData.cpp b/deps/chakrashim/core/lib/Backend/GlobOptBlockData.cpp
index 46e10460844..2d2fbc7a3a0 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptBlockData.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptBlockData.cpp
@@ -660,7 +660,7 @@ GlobOptBlockData::MergeBlockData(
{
if (!this->argObjSyms->Equal(fromData->argObjSyms))
{
- this->globOpt->CannotAllocateArgumentsObjectOnStack();
+ this->globOpt->CannotAllocateArgumentsObjectOnStack(nullptr);
}
}
@@ -974,8 +974,7 @@ GlobOptBlockData::MergeValueInfo(
fromDataValueInfo->AsArrayValueInfo(),
fromDataSym,
symsRequiringCompensation,
- symsCreatedForMerge,
- isLoopBackEdge);
+ symsCreatedForMerge);
}
// Consider: If both values are VarConstantValueInfo with the same value, we could
@@ -1073,8 +1072,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
const ArrayValueInfo *const fromDataValueInfo,
Sym *const arraySym,
BVSparse *const symsRequiringCompensation,
- BVSparse *const symsCreatedForMerge,
- bool isLoopBackEdge)
+ BVSparse *const symsCreatedForMerge)
{
Assert(mergedValueType.IsAnyOptimizedArray());
Assert(toDataValueInfo);
@@ -1097,7 +1095,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
- if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
+ if (!this->globOpt->IsLoopPrePass())
{
// Adding compensation code in the prepass won't help, as the symstores would again be different in the main pass.
Assert(symsRequiringCompensation);
@@ -1125,7 +1123,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
- if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
+ if (!this->globOpt->IsLoopPrePass())
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
@@ -1152,7 +1150,7 @@ ValueInfo *GlobOptBlockData::MergeArrayValueInfo(
}
else
{
- if (!this->globOpt->IsLoopPrePass() && !isLoopBackEdge)
+ if (!this->globOpt->IsLoopPrePass())
{
Assert(symsRequiringCompensation);
symsRequiringCompensation->Set(arraySym->m_id);
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptBlockData.h b/deps/chakrashim/core/lib/Backend/GlobOptBlockData.h
index 541c7603411..7266cfaf413 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptBlockData.h
+++ b/deps/chakrashim/core/lib/Backend/GlobOptBlockData.h
@@ -20,7 +20,7 @@ class ExprAttributes
}
private:
- static const uint32 BitMask(const uint index)
+ static uint32 BitMask(const uint index)
{
return 1u << index;
}
@@ -264,7 +264,7 @@ class GlobOptBlockData
Value * MergeValues(Value *toDataValue, Value *fromDataValue, Sym *fromDataSym, bool isLoopBackEdge, BVSparse *const symsRequiringCompensation, BVSparse *const symsCreatedForMerge);
ValueInfo * MergeValueInfo(Value *toDataVal, Value *fromDataVal, Sym *fromDataSym, bool isLoopBackEdge, bool sameValueNumber, BVSparse *const symsRequiringCompensation, BVSparse *const symsCreatedForMerge);
JsTypeValueInfo * MergeJsTypeValueInfo(JsTypeValueInfo * toValueInfo, JsTypeValueInfo * fromValueInfo, bool isLoopBackEdge, bool sameValueNumber);
- ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse *const symsRequiringCompensation, BVSparse *const symsCreatedForMerge, bool isLoopBackEdge);
+ ValueInfo * MergeArrayValueInfo(const ValueType mergedValueType, const ArrayValueInfo *const toDataValueInfo, const ArrayValueInfo *const fromDataValueInfo, Sym *const arraySym, BVSparse *const symsRequiringCompensation, BVSparse *const symsCreatedForMerge);
// Argument Tracking
public:
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptExpr.cpp b/deps/chakrashim/core/lib/Backend/GlobOptExpr.cpp
index d95b63779ce..2af0dbd129a 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptExpr.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptExpr.cpp
@@ -814,28 +814,20 @@ GlobOpt::ProcessArrayValueKills(IR::Instr *instr)
{
switch (instr->m_opcode)
{
- case Js::OpCode::StElemC:
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
case Js::OpCode::DeleteElemI_A:
case Js::OpCode::DeleteElemIStrict_A:
- case Js::OpCode::ConsoleScopedStFld:
- case Js::OpCode::ConsoleScopedStFldStrict:
- case Js::OpCode::ScopedStFld:
- case Js::OpCode::ScopedStFldStrict:
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StFldStrict:
case Js::OpCode::StRootFldStrict:
- case Js::OpCode::StSuperFld:
case Js::OpCode::StSlot:
case Js::OpCode::StSlotChkUndecl:
case Js::OpCode::DeleteFld:
case Js::OpCode::DeleteRootFld:
case Js::OpCode::DeleteFldStrict:
case Js::OpCode::DeleteRootFldStrict:
- case Js::OpCode::ScopedDeleteFld:
- case Js::OpCode::ScopedDeleteFldStrict:
case Js::OpCode::StArrViewElem:
// These array helpers may change A.length (and A[i] could be A.length)...
case Js::OpCode::InlineArrayPush:
@@ -852,7 +844,6 @@ GlobOpt::ProcessArrayValueKills(IR::Instr *instr)
case IR::HelperArray_Shift:
case IR::HelperArray_Unshift:
case IR::HelperArray_Splice:
- case IR::HelperArray_Concat:
this->currentBlock->globOptData.liveArrayValues->ClearAll();
break;
}
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptFields.cpp b/deps/chakrashim/core/lib/Backend/GlobOptFields.cpp
index 56d000fac9b..af2ae93713b 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptFields.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptFields.cpp
@@ -208,7 +208,7 @@ void GlobOpt::KillLiveFields(BVSparse *const fieldsToKill, BV
}
void
-GlobOpt::KillLiveElems(IR::IndirOpnd * indirOpnd, IR::Opnd * valueOpnd, BVSparse * bv, bool inGlobOpt, Func *func)
+GlobOpt::KillLiveElems(IR::IndirOpnd * indirOpnd, BVSparse * bv, bool inGlobOpt, Func *func)
{
IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd();
@@ -225,42 +225,18 @@ GlobOpt::KillLiveElems(IR::IndirOpnd * indirOpnd, IR::Opnd * valueOpnd, BVSparse
// - We check the type specialization status for the sym as well. For the purpose of doing kills, we can assume that
// if type specialization happened, that fields don't need to be killed. Note that they may be killed in the next
// pass based on the value.
- bool isSafeToTransfer = true;
- if (func->GetThisOrParentInlinerHasArguments() || this->IsNonNumericRegOpnd(indexOpnd, inGlobOpt, &isSafeToTransfer))
+ if (func->GetThisOrParentInlinerHasArguments() ||
+ (
+ indexOpnd &&
+ (
+ indexOpnd->m_sym->m_isNotNumber ||
+ (inGlobOpt && !indexOpnd->GetValueType().IsNumber() && !currentBlock->globOptData.IsTypeSpecialized(indexOpnd->m_sym))
+ )
+ ))
{
this->KillAllFields(bv); // This also kills all property type values, as the same bit-vector tracks those stack syms
SetAnyPropertyMayBeWrittenTo();
}
- else if (inGlobOpt)
- {
- Value * indexValue = indexOpnd ? this->currentBlock->globOptData.FindValue(indexOpnd->GetSym()) : nullptr;
- ValueInfo * indexValueInfo = indexValue ? indexValue->GetValueInfo() : nullptr;
- int indexLowerBound = 0;
-
- if (!isSafeToTransfer || indirOpnd->GetOffset() < 0 || (indexOpnd && (!indexValueInfo || !indexValueInfo->TryGetIntConstantLowerBound(&indexLowerBound, false) || indexLowerBound < 0)))
- {
- // Write/delete to a non-integer numeric index can't alias a name on the RHS of a dot, but it change object layout
- this->KillAllObjectTypes(bv);
- }
- else if ((!valueOpnd || valueOpnd->IsVar()) && this->objectTypeSyms != nullptr)
- {
- // If we wind up converting a native array, block final-type opt at this point, because we could evolve
- // to a type with the wrong type ID. Do this by noting that we may have evolved any type and so must
- // check it before evolving it further.
- IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd();
- Value * baseValue = baseOpnd ? this->currentBlock->globOptData.FindValue(baseOpnd->m_sym) : nullptr;
- ValueInfo * baseValueInfo = baseValue ? baseValue->GetValueInfo() : nullptr;
- if (!baseValueInfo || !baseValueInfo->IsNotNativeArray() ||
- (this->IsLoopPrePass() && !this->IsSafeToTransferInPrepass(baseOpnd->m_sym, baseValueInfo)))
- {
- if (this->currentBlock->globOptData.maybeWrittenTypeSyms == nullptr)
- {
- this->currentBlock->globOptData.maybeWrittenTypeSyms = JitAnew(this->alloc, BVSparse, this->alloc);
- }
- this->currentBlock->globOptData.maybeWrittenTypeSyms->Or(this->objectTypeSyms);
- }
- }
- }
}
void
@@ -347,40 +323,27 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse *bv, bo
IR::JnHelperMethod fnHelper;
switch(instr->m_opcode)
{
- case Js::OpCode::StElemC:
case Js::OpCode::StElemI_A:
case Js::OpCode::StElemI_A_Strict:
Assert(dstOpnd != nullptr);
KillLiveFields(this->lengthEquivBv, bv);
- KillLiveElems(dstOpnd->AsIndirOpnd(), instr->GetSrc1(), bv, inGlobOpt, instr->m_func);
- if (inGlobOpt)
- {
- KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
- }
+ KillLiveElems(dstOpnd->AsIndirOpnd(), bv, inGlobOpt, instr->m_func);
break;
case Js::OpCode::InitComputedProperty:
- case Js::OpCode::InitGetElemI:
- case Js::OpCode::InitSetElemI:
- KillLiveElems(dstOpnd->AsIndirOpnd(), instr->GetSrc1(), bv, inGlobOpt, instr->m_func);
- if (inGlobOpt)
- {
- KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
- }
+ KillLiveElems(dstOpnd->AsIndirOpnd(), bv, inGlobOpt, instr->m_func);
break;
case Js::OpCode::DeleteElemI_A:
case Js::OpCode::DeleteElemIStrict_A:
Assert(dstOpnd != nullptr);
- KillLiveElems(instr->GetSrc1()->AsIndirOpnd(), nullptr, bv, inGlobOpt, instr->m_func);
+ KillLiveElems(instr->GetSrc1()->AsIndirOpnd(), bv, inGlobOpt, instr->m_func);
break;
case Js::OpCode::DeleteFld:
case Js::OpCode::DeleteRootFld:
case Js::OpCode::DeleteFldStrict:
case Js::OpCode::DeleteRootFldStrict:
- case Js::OpCode::ScopedDeleteFld:
- case Js::OpCode::ScopedDeleteFldStrict:
sym = instr->GetSrc1()->AsSymOpnd()->m_sym;
KillLiveFields(sym->AsPropertySym(), bv);
if (inGlobOpt)
@@ -402,44 +365,13 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse *bv, bo
this->KillAllObjectTypes(bv);
}
break;
-
- case Js::OpCode::ConsoleScopedStFld:
- case Js::OpCode::ConsoleScopedStFldStrict:
- case Js::OpCode::ScopedStFld:
- case Js::OpCode::ScopedStFldStrict:
- // This is already taken care of for FastFld opcodes
-
- if (inGlobOpt)
- {
- KillObjectHeaderInlinedTypeSyms(this->currentBlock, false);
- if (this->objectTypeSyms)
- {
- if (this->currentBlock->globOptData.maybeWrittenTypeSyms == nullptr)
- {
- this->currentBlock->globOptData.maybeWrittenTypeSyms = JitAnew(this->alloc, BVSparse, this->alloc);
- }
- this->currentBlock->globOptData.maybeWrittenTypeSyms->Or(this->objectTypeSyms);
- }
- }
-
- // fall through
-
case Js::OpCode::InitFld:
- case Js::OpCode::InitConstFld:
- case Js::OpCode::InitLetFld:
- case Js::OpCode::InitRootFld:
- case Js::OpCode::InitRootConstFld:
- case Js::OpCode::InitRootLetFld:
-#if !FLOATVAR
- case Js::OpCode::StSlotBoxTemp:
-#endif
case Js::OpCode::StFld:
case Js::OpCode::StRootFld:
case Js::OpCode::StFldStrict:
case Js::OpCode::StRootFldStrict:
case Js::OpCode::StSlot:
case Js::OpCode::StSlotChkUndecl:
- case Js::OpCode::StSuperFld:
Assert(dstOpnd != nullptr);
sym = dstOpnd->AsSymOpnd()->m_sym;
if (inGlobOpt)
@@ -461,27 +393,14 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse *bv, bo
case Js::OpCode::InlineArrayPush:
case Js::OpCode::InlineArrayPop:
- if(instr->m_func->GetThisOrParentInlinerHasArguments())
- {
- this->KillAllFields(bv);
- this->SetAnyPropertyMayBeWrittenTo();
- }
- else
- {
- KillLiveFields(this->lengthEquivBv, bv);
- if (inGlobOpt)
- {
- // Deleting an item, or pushing a property to a non-array, may change object layout
- KillAllObjectTypes(bv);
- }
- }
+ KillLiveFields(this->lengthEquivBv, bv);
break;
case Js::OpCode::InlineeStart:
case Js::OpCode::InlineeEnd:
Assert(!instr->UsesAllFields());
- // Kill all live 'arguments' and 'caller' fields, as 'inlineeFunction.arguments' and 'inlineeFunction.caller'
+ // Kill all live 'arguments' and 'caller' fields, as 'inlineeFunction.arguments' and 'inlineeFunction.caller'
// cannot be copy-propped across different instances of the same inlined function.
KillLiveFields(argumentsEquivBv, bv);
KillLiveFields(callerEquivBv, bv);
@@ -490,68 +409,19 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse *bv, bo
case Js::OpCode::CallDirect:
fnHelper = instr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper;
- switch (fnHelper)
+ // Kill length field for built-ins that can update it.
+ if(nullptr != this->lengthEquivBv && (fnHelper == IR::JnHelperMethod::HelperArray_Shift || fnHelper == IR::JnHelperMethod::HelperArray_Splice
+ || fnHelper == IR::JnHelperMethod::HelperArray_Unshift))
{
- case IR::JnHelperMethod::HelperArray_Shift:
- case IR::JnHelperMethod::HelperArray_Splice:
- case IR::JnHelperMethod::HelperArray_Unshift:
- // Kill length field for built-ins that can update it.
- if (nullptr != this->lengthEquivBv)
- {
- // If has arguments, all fields are killed in fall through
- if (!instr->m_func->GetThisOrParentInlinerHasArguments())
- {
- KillLiveFields(this->lengthEquivBv, bv);
- }
- }
- // fall through
-
- case IR::JnHelperMethod::HelperArray_Reverse:
- if (instr->m_func->GetThisOrParentInlinerHasArguments())
- {
- this->KillAllFields(bv);
- this->SetAnyPropertyMayBeWrittenTo();
- }
- else if (inGlobOpt)
- {
- // Deleting an item may change object layout
- KillAllObjectTypes(bv);
- }
- break;
-
- case IR::JnHelperMethod::HelperArray_Slice:
- case IR::JnHelperMethod::HelperArray_Concat:
- if (inGlobOpt && this->objectTypeSyms)
- {
- if (this->currentBlock->globOptData.maybeWrittenTypeSyms == nullptr)
- {
- this->currentBlock->globOptData.maybeWrittenTypeSyms = JitAnew(this->alloc, BVSparse, this->alloc);
- }
- this->currentBlock->globOptData.maybeWrittenTypeSyms->Or(this->objectTypeSyms);
- }
- break;
+ KillLiveFields(this->lengthEquivBv, bv);
+ }
- case IR::JnHelperMethod::HelperRegExp_Exec:
- case IR::JnHelperMethod::HelperRegExp_ExecResultNotUsed:
- case IR::JnHelperMethod::HelperRegExp_ExecResultUsed:
- case IR::JnHelperMethod::HelperRegExp_ExecResultUsedAndMayBeTemp:
- case IR::JnHelperMethod::HelperRegExp_MatchResultNotUsed:
- case IR::JnHelperMethod::HelperRegExp_MatchResultUsed:
- case IR::JnHelperMethod::HelperRegExp_MatchResultUsedAndMayBeTemp:
- case IR::JnHelperMethod::HelperRegExp_ReplaceStringResultUsed:
- case IR::JnHelperMethod::HelperRegExp_ReplaceStringResultNotUsed:
- case IR::JnHelperMethod::HelperRegExp_SplitResultNotUsed:
- case IR::JnHelperMethod::HelperRegExp_SplitResultUsed:
- case IR::JnHelperMethod::HelperRegExp_SplitResultUsedAndMayBeTemp:
- case IR::JnHelperMethod::HelperRegExp_SymbolSearch:
- case IR::JnHelperMethod::HelperString_Match:
- case IR::JnHelperMethod::HelperString_Search:
- case IR::JnHelperMethod::HelperString_Split:
- case IR::JnHelperMethod::HelperString_Replace:
- // Consider: We may not need to kill all fields here.
- // We need to kill all the built-in properties that can be written, though, and there are a lot of those.
- this->KillAllFields(bv);
- break;
+ if ((fnHelper == IR::JnHelperMethod::HelperRegExp_Exec)
+ || (fnHelper == IR::JnHelperMethod::HelperString_Match)
+ || (fnHelper == IR::JnHelperMethod::HelperString_Replace))
+ {
+ // Consider: We may not need to kill all fields here.
+ this->KillAllFields(bv);
}
break;
@@ -564,18 +434,6 @@ GlobOpt::ProcessFieldKills(IR::Instr *instr, BVSparse *bv, bo
}
break;
- case Js::OpCode::InitClass:
- case Js::OpCode::InitProto:
- case Js::OpCode::NewScObjectNoCtor:
- case Js::OpCode::NewScObjectNoCtorFull:
- if (inGlobOpt)
- {
- // Opcodes that make an object into a prototype may break object-header-inlining and final type opt.
- // Kill all known object layouts.
- KillAllObjectTypes(bv);
- }
- break;
-
default:
if (instr->UsesAllFields())
{
@@ -635,7 +493,7 @@ GlobOpt::CreateFieldSrcValue(PropertySym * sym, PropertySym * originalSym, IR::O
}
Assert((*ppOpnd)->AsSymOpnd()->m_sym == sym || this->IsLoopPrePass());
-
+
// We don't use the sym store to do copy prop on hoisted fields, but create a value
// in case it can be copy prop out of the loop.
return this->NewGenericValue(ValueType::Uninitialized, *ppOpnd);
@@ -906,7 +764,7 @@ GlobOpt::FinishOptPropOp(IR::Instr *instr, IR::PropertySymOpnd *opnd, BasicBlock
SymID opndId = opnd->HasObjectTypeSym() ? opnd->GetObjectTypeSym()->m_id : -1;
- if (!isObjTypeSpecialized || opnd->IsBeingAdded())
+ if (!isObjTypeChecked)
{
if (block->globOptData.maybeWrittenTypeSyms == nullptr)
{
@@ -929,6 +787,7 @@ GlobOpt::FinishOptPropOp(IR::Instr *instr, IR::PropertySymOpnd *opnd, BasicBlock
if (!isObjTypeSpecialized || opnd->ChangesObjectLayout())
{
this->KillObjectHeaderInlinedTypeSyms(block, isObjTypeSpecialized, opndId);
+ this->KillAuxSlotPtrSyms(opnd, block, isObjTypeSpecialized);
}
else if (!isObjTypeChecked && this->HasLiveObjectHeaderInlinedTypeSym(block, true, opndId))
{
@@ -939,6 +798,37 @@ GlobOpt::FinishOptPropOp(IR::Instr *instr, IR::PropertySymOpnd *opnd, BasicBlock
return isObjTypeSpecialized;
}
+StackSym *
+GlobOpt::EnsureAuxSlotPtrSym(IR::PropertySymOpnd *opnd)
+{
+ StackSym *auxSlotPtrSym = opnd->EnsureAuxSlotPtrSym(this->func);
+ this->auxSlotPtrSyms->Set(auxSlotPtrSym->m_id);
+ return auxSlotPtrSym;
+}
+
+void
+GlobOpt::KillAuxSlotPtrSyms(IR::PropertySymOpnd *opnd, BasicBlock *block, bool isObjTypeSpecialized)
+{
+ StackSym *auxSlotPtrSym = nullptr;
+ if (isObjTypeSpecialized)
+ {
+ // Kill all aux slot syms other than this one
+ auxSlotPtrSym = opnd->GetAuxSlotPtrSym();
+ if (auxSlotPtrSym)
+ {
+ Assert(this->auxSlotPtrSyms && this->auxSlotPtrSyms->Test(auxSlotPtrSym->m_id));
+ this->auxSlotPtrSyms->Clear(auxSlotPtrSym->m_id);
+ }
+ }
+
+ block->globOptData.liveFields->Minus(this->auxSlotPtrSyms);
+
+ if (auxSlotPtrSym)
+ {
+ this->auxSlotPtrSyms->Set(auxSlotPtrSym->m_id);
+ }
+}
+
void
GlobOpt::KillObjectHeaderInlinedTypeSyms(BasicBlock *block, bool isObjTypeSpecialized, SymID opndId)
{
@@ -1123,19 +1013,6 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
Assert(opnd->IsTypeCheckSeqCandidate());
Assert(opnd->HasObjectTypeSym());
- if (opnd->HasTypeMismatch())
- {
- if (emitsTypeCheckOut != nullptr)
- {
- *emitsTypeCheckOut = false;
- }
- if (changesTypeValueOut != nullptr)
- {
- *changesTypeValueOut = false;
- }
- return false;
- }
-
bool isStore = opnd == instr->GetDst();
bool isTypeDead = opnd->IsTypeDead();
bool consumeType = makeChanges && !IsLoopPrePass();
@@ -1243,7 +1120,7 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
// a new type value here.
isSpecialized = false;
- if (makeChanges)
+ if (consumeType)
{
opnd->SetTypeMismatch(true);
}
@@ -1287,7 +1164,7 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
// a new type value here.
isSpecialized = false;
- if (makeChanges)
+ if (consumeType)
{
opnd->SetTypeMismatch(true);
}
@@ -1338,7 +1215,7 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
{
// Indicates failure/mismatch
isSpecialized = false;
- if (makeChanges)
+ if (consumeType)
{
opnd->SetTypeMismatch(true);
}
@@ -1407,8 +1284,8 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
}
}
else if (valueInfo->GetJsTypeSet() &&
- (opnd->IsMono() ?
- valueInfo->GetJsTypeSet()->Contains(opnd->GetFirstEquivalentType()) :
+ (opnd->IsMono() ?
+ valueInfo->GetJsTypeSet()->Contains(opnd->GetFirstEquivalentType()) :
IsSubsetOf(opndTypeSet, valueInfo->GetJsTypeSet())
)
)
@@ -1437,7 +1314,7 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
// a new type value here.
isSpecialized = false;
- if (makeChanges)
+ if (consumeType)
{
opnd->SetTypeMismatch(true);
}
@@ -1541,6 +1418,43 @@ GlobOpt::ProcessPropOpInTypeCheckSeq(IR::Instr* instr, IR::PropertySymOpnd *opnd
*changesTypeValueOut = isSpecialized && (emitsTypeCheck || addsProperty);
}
+ if (makeChanges)
+ {
+ // Track liveness of aux slot ptr syms.
+ if (!PHASE_OFF(Js::ReuseAuxSlotPtrPhase, this->func) && isSpecialized)
+ {
+ if (opnd->UsesAuxSlot() && !opnd->IsLoadedFromProto())
+ {
+ // Optimized ld/st that loads/uses an aux slot ptr.
+ // Aux slot sym is live forward.
+ StackSym *auxSlotPtrSym = this->EnsureAuxSlotPtrSym(opnd);
+ if (!this->IsLoopPrePass() && opnd->IsTypeChecked())
+ {
+ if (block->globOptData.liveFields->TestAndSet(auxSlotPtrSym->m_id))
+ {
+ // Aux slot sym is available here. Tell lowerer to use it.
+ opnd->SetAuxSlotPtrSymAvailable(true);
+ }
+ }
+ else
+ {
+ block->globOptData.liveFields->Set(auxSlotPtrSym->m_id);
+ }
+ }
+ else if (!opnd->IsTypeChecked())
+ {
+ // Type sym is not available here (i.e., object shape is not known) and we're not loading the aux slots.
+ // May get here with aux slot sym still in live set if type sym is not in the value table.
+ // Clear the aux slot sym out of the live set.
+ StackSym *auxSlotPtrSym = opnd->GetAuxSlotPtrSym();
+ if (auxSlotPtrSym)
+ {
+ block->globOptData.liveFields->Clear(auxSlotPtrSym->m_id);
+ }
+ }
+ }
+ }
+
return isSpecialized;
}
@@ -1559,7 +1473,7 @@ GlobOpt::OptNewScObject(IR::Instr** instrPtr, Value* srcVal)
instr->m_func->GetConstructorCache(static_cast(instr->AsProfiledInstr()->u.profileId)) : nullptr;
// TODO: OOP JIT, enable assert
- //Assert(ctorCache == nullptr || srcVal->GetValueInfo()->IsVarConstant() && Js::JavascriptFunction::Is(srcVal->GetValueInfo()->AsVarConstant()->VarValue()));
+ //Assert(ctorCache == nullptr || srcVal->GetValueInfo()->IsVarConstant() && Js::VarIs(srcVal->GetValueInfo()->AsVarConstant()->VarValue()));
Assert(ctorCache == nullptr || !ctorCache->IsTypeFinal() || ctorCache->CtorHasNoExplicitReturnValue());
if (ctorCache != nullptr && !ctorCache->SkipNewScObject() && (isCtorInlined || ctorCache->IsTypeFinal()))
@@ -1854,6 +1768,11 @@ GlobOpt::KillObjectType(StackSym* objectSym, BVSparse* liveFi
}
liveFields->Clear(objectSym->GetObjectTypeSym()->m_id);
+ StackSym *auxSlotPtrSym = objectSym->GetAuxSlotPtrSym();
+ if (auxSlotPtrSym)
+ {
+ liveFields->Clear(auxSlotPtrSym->m_id);
+ }
}
void
@@ -1862,6 +1781,7 @@ GlobOpt::KillAllObjectTypes(BVSparse* liveFields)
if (this->objectTypeSyms && liveFields)
{
liveFields->Minus(this->objectTypeSyms);
+ liveFields->Minus(this->auxSlotPtrSyms);
}
}
@@ -2047,6 +1967,12 @@ GlobOpt::UpdateObjPtrValueType(IR::Opnd * opnd, IR::Instr * instr)
AnalysisAssert(type != nullptr);
Js::TypeId typeId = type->GetTypeId();
+ if (Js::TypedArrayBase::Is(typeId))
+ {
+ // Type ID does not allow us to distinguish between virtual and non-virtual typed array.
+ return;
+ }
+
// Passing false for useVirtual as we would never have a virtual typed array hitting this code path
ValueType newValueType = ValueType::FromTypeId(typeId, false);
@@ -2055,8 +1981,20 @@ GlobOpt::UpdateObjPtrValueType(IR::Opnd * opnd, IR::Instr * instr)
switch (typeId)
{
default:
- // Can't mark as definite object because it may actually be object-with-array.
- // Consider: a value type that subsumes object, array, and object-with-array.
+ if (typeId > Js::TypeIds_LastStaticType)
+ {
+ Assert(typeId != Js::TypeIds_Proxy);
+ if (objValueType.IsLikelyArrayOrObjectWithArray())
+ {
+ // If we have likely object with array before, we can't make it definite object with array
+ // since we have only proved that it is an object.
+ // Keep the likely array or object with array.
+ }
+ else
+ {
+ newValueType = ValueType::GetObject(ObjectType::Object);
+ }
+ }
break;
case Js::TypeIds_NativeIntArray:
case Js::TypeIds_NativeFloatArray:
diff --git a/deps/chakrashim/core/lib/Backend/GlobOptIntBounds.cpp b/deps/chakrashim/core/lib/Backend/GlobOptIntBounds.cpp
index 490ac1519d5..58cd0148af7 100644
--- a/deps/chakrashim/core/lib/Backend/GlobOptIntBounds.cpp
+++ b/deps/chakrashim/core/lib/Backend/GlobOptIntBounds.cpp
@@ -803,7 +803,7 @@ void GlobOpt::TrackIntSpecializedAddSubConstant(
// Ensure that the sym is live in the landing pad, and that its value has not changed in an unknown way yet
Value *const landingPadValue = currentBlock->loop->landingPad->globOptData.FindValue(sym);
- if(!landingPadValue || srcValueNumber != landingPadValue->GetValueNumber() || currentBlock->loop->symsDefInLoop->Test(sym->m_id))
+ if(!landingPadValue || srcValueNumber != landingPadValue->GetValueNumber())
{
updateInductionVariableValueNumber = false;
break;
@@ -1278,20 +1278,13 @@ GlobOpt::InvalidateInductionVariables(IR::Instr * instr)
}
// If this is an induction variable, then treat it the way the prepass would have if it had seen
- // the assignment and the resulting change to the value number, and mark induction variables
- // for the loop as indeterminate.
- // We need to invalidate all induction variables for the loop, because we might have used the
- // invalidated induction variable to calculate the loopCount, and this now invalid loopCount
- // also impacts bound checks for secondary induction variables
+ // the assignment and the resulting change to the value number, and mark it as indeterminate.
for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent)
{
- if (loop->inductionVariables && loop->inductionVariables->ContainsKey(dstSym->m_id))
+ InductionVariable *iv = nullptr;
+ if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv))
{
- for (auto it = loop->inductionVariables->GetIterator(); it.IsValid(); it.MoveNext())
- {
- InductionVariable& inductionVariable = it.CurrentValueReference();
- inductionVariable.SetChangeIsIndeterminate();
- }
+ iv->SetChangeIsIndeterminate();
}
}
}
@@ -1829,16 +1822,11 @@ void GlobOpt::GenerateLoopCountPlusOne(Loop *const loop, LoopCount *const loopCo
IR::RegOpnd *loopCountOpnd = IR::RegOpnd::New(type, func);
IR::RegOpnd *minusOneOpnd = IR::RegOpnd::New(loopCount->LoopCountMinusOneSym(), type, func);
minusOneOpnd->SetIsJITOptimizedReg(true);
- IR::Instr* incrInstr = IR::Instr::New(Js::OpCode::Add_I4,
- loopCountOpnd,
- minusOneOpnd,
- IR::IntConstOpnd::New(1, type, func, true),
- func);
-
- insertBeforeInstr->InsertBefore(incrInstr);
-
- // Incrementing to 1 can overflow - add a bounds check bailout here
- incrInstr->ConvertToBailOutInstr(bailOutInfo, IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck);
+ insertBeforeInstr->InsertBefore(IR::Instr::New(Js::OpCode::Add_I4,
+ loopCountOpnd,
+ minusOneOpnd,
+ IR::IntConstOpnd::New(1, type, func, true),
+ func));
loopCount->SetLoopCountSym(loopCountOpnd->GetStackSym());
}
}
@@ -2992,11 +2980,7 @@ void GlobOpt::DetermineArrayBoundCheckHoistability(
{
// The loop count is constant, fold (indexOffset + loopCountMinusOne * maxMagnitudeChange)
TRACE_PHASE_VERBOSE(Js::Phase::BoundCheckHoistPhase, 3, _u("Loop count is constant, folding\n"));
-
- int loopCountMinusOnePlusOne = 0;
-
- if (Int32Math::Add(loopCount->LoopCountMinusOneConstantValue(), 1, &loopCountMinusOnePlusOne) ||
- Int32Math::Mul(loopCountMinusOnePlusOne, maxMagnitudeChange, &offset) ||
+ if(Int32Math::Mul(loopCount->LoopCountMinusOneConstantValue(), maxMagnitudeChange, &offset) ||
Int32Math::Add(offset, indexOffset, &offset))
{
TRACE_PHASE_VERBOSE(Js::Phase::BoundCheckHoistPhase, 4, _u("Folding failed\n"));
diff --git a/deps/chakrashim/core/lib/Backend/IR.cpp b/deps/chakrashim/core/lib/Backend/IR.cpp
index 1e3a4c1074b..3893e1f4c4b 100644
--- a/deps/chakrashim/core/lib/Backend/IR.cpp
+++ b/deps/chakrashim/core/lib/Backend/IR.cpp
@@ -68,7 +68,7 @@ Instr::IsPlainInstr() const
}
bool
-Instr::DoStackArgsOpt(Func *topFunc) const
+Instr::DoStackArgsOpt() const
{
return this->usesStackArgumentsObject && m_func->IsStackArgsEnabled();
}
@@ -3307,14 +3307,7 @@ bool Instr::TransfersSrcValue()
// Consider: Add opcode attribute to indicate whether the opcode would use the value or not
- return
- this->GetDst() != nullptr &&
-
- // The lack of a Src2 does not always indicate that the instr is not a transfer instr (ex: StSlotChkUndecl).
- (this->GetSrc2() == nullptr || OpCodeAttr::NonIntTransfer(this->m_opcode)) &&
-
- !OpCodeAttr::DoNotTransfer(this->m_opcode) &&
- !this->CallsAccessor();
+ return this->GetDst() != nullptr && this->GetSrc2() == nullptr && !OpCodeAttr::DoNotTransfer(this->m_opcode) && !this->CallsAccessor();
}
diff --git a/deps/chakrashim/core/lib/Backend/IR.h b/deps/chakrashim/core/lib/Backend/IR.h
index 90e4a25f60f..0b2ade8495d 100644
--- a/deps/chakrashim/core/lib/Backend/IR.h
+++ b/deps/chakrashim/core/lib/Backend/IR.h
@@ -214,7 +214,7 @@ class Instr
bool StartsBasicBlock() const;
bool EndsBasicBlock() const;
bool HasFallThrough() const;
- bool DoStackArgsOpt(Func *topFunc) const;
+ bool DoStackArgsOpt() const;
bool HasAnyLoadHeapArgsOpCode();
bool IsEqual(IR::Instr *instr) const;
diff --git a/deps/chakrashim/core/lib/Backend/IRBuilder.cpp b/deps/chakrashim/core/lib/Backend/IRBuilder.cpp
index 75ee5c7f245..4e97a6d22f0 100644
--- a/deps/chakrashim/core/lib/Backend/IRBuilder.cpp
+++ b/deps/chakrashim/core/lib/Backend/IRBuilder.cpp
@@ -49,7 +49,7 @@ IRBuilder::AddStatementBoundary(uint statementIndex, uint offset)
}
}
}
- else if (Js::Configuration::Global.flags.IsEnabled(Js::BailOutAtEveryLineFlag))
+ else if (Js::Configuration::Global.flags.IsEnabled(Js::BailOutAtEveryLineFlag))
{
this->InjectBailOut(offset);
}
@@ -774,8 +774,6 @@ IRBuilder::Build()
if (!this->RegIsTemp(dstRegSlot) && !this->RegIsConstant(dstRegSlot))
{
SymID symId = dstSym->m_id;
-
- AssertOrFailFast(symId < m_stSlots->Length());
if (this->m_stSlots->Test(symId))
{
// For jitted loop bodies that are in a try block, we consider any symbol that has a
@@ -1562,7 +1560,7 @@ IRBuilder::BuildConstantLoads()
instr = IR::Instr::NewConstantLoad(dstOpnd, varConst, valueType, m_func,
m_func->IsOOPJIT() ? m_func->GetJITFunctionBody()->GetConstAsT(reg) : nullptr);
break;
- }
+ }
this->AddInstr(instr, Js::Constants::NoByteCodeOffset);
}
@@ -1879,9 +1877,11 @@ IRBuilder::BuildReg2(Js::OpCode newOpcode, uint32 offset, Js::RegSlot R0, Js::Re
switch (newOpcode)
{
+ case Js::OpCode::SpreadObjectLiteral:
+ // fall through
case Js::OpCode::SetComputedNameVar:
{
- IR::Instr *instr = IR::Instr::New(Js::OpCode::SetComputedNameVar, m_func);
+ IR::Instr *instr = IR::Instr::New(newOpcode, m_func);
instr->SetSrc1(this->BuildSrcOpnd(R0));
instr->SetSrc2(src1Opnd);
this->AddInstr(instr, offset);
@@ -2204,7 +2204,7 @@ IRBuilder::BuildReg3(Js::OpCode newOpcode, uint32 offset, Js::RegSlot dstRegSlot
{
InsertBailOnNoProfile(instr);
}
-
+
switch (newOpcode)
{
case Js::OpCode::LdHandlerScope:
@@ -2303,12 +2303,35 @@ void
IRBuilder::BuildReg4(Js::OpCode newOpcode, uint32 offset, Js::RegSlot dstRegSlot, Js::RegSlot src1RegSlot,
Js::RegSlot src2RegSlot, Js::RegSlot src3RegSlot)
{
- IR::Instr * instr;
- Assert(newOpcode == Js::OpCode::Concat3);
+ IR::Instr * instr = nullptr;
+ Assert(newOpcode == Js::OpCode::Concat3 || newOpcode == Js::OpCode::Restify);
IR::RegOpnd * src1Opnd = this->BuildSrcOpnd(src1RegSlot);
IR::RegOpnd * src2Opnd = this->BuildSrcOpnd(src2RegSlot);
- IR::RegOpnd * src3Opnd = this->BuildSrcOpnd(src3RegSlot);
+ IR::RegOpnd * src3Opnd = this->BuildSrcOpnd(src3RegSlot);
+
+ if (newOpcode == Js::OpCode::Restify)
+ {
+ IR::RegOpnd * src0Opnd = this->BuildSrcOpnd(dstRegSlot);
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src3Opnd, m_func);
+ this->AddInstr(instr, offset);
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src2Opnd, instr->GetDst(), m_func);
+ this->AddInstr(instr, Js::Constants::NoByteCodeOffset);
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src1Opnd, instr->GetDst(), m_func);
+ this->AddInstr(instr, Js::Constants::NoByteCodeOffset);
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src0Opnd, instr->GetDst(), m_func);
+ this->AddInstr(instr, Js::Constants::NoByteCodeOffset);
+
+ IR::Opnd *firstArg = instr->GetDst();
+ instr = IR::Instr::New(newOpcode, m_func);
+ instr->SetSrc1(firstArg);
+ this->AddInstr(instr, Js::Constants::NoByteCodeOffset);
+ return;
+ }
+
IR::RegOpnd * dstOpnd = this->BuildDstOpnd(dstRegSlot);
IR::RegOpnd * str1Opnd = InsertConvPrimStr(src1Opnd, offset, true);
@@ -2530,7 +2553,7 @@ IRBuilder::BuildReg5(Js::OpCode newOpcode, uint32 offset, Js::RegSlot dstRegSlot
src3Opnd = this->BuildSrcOpnd(src3RegSlot);
src4Opnd = this->BuildSrcOpnd(src4RegSlot);
dstOpnd = this->BuildDstOpnd(dstRegSlot);
-
+
instr = IR::Instr::New(Js::OpCode::ArgOut_A, IR::RegOpnd::New(TyVar, m_func), src4Opnd, m_func);
this->AddInstr(instr, offset);
@@ -2997,6 +3020,7 @@ IRBuilder::BuildReg1Unsigned1(Js::OpCode newOpcode, uint offset, Js::RegSlot R0,
dstOpnd->SetValueTypeFixed();
}
}
+
///----------------------------------------------------------------------------
///
/// IRBuilder::BuildReg2Int1
@@ -3423,6 +3447,29 @@ IRBuilder::BuildElementSlot(Js::OpCode newOpcode, uint32 offset, Js::RegSlot fie
}
break;
+ case Js::OpCode::StPropIdArrFromVar:
+ {
+ IR::RegOpnd * src0Opnd = this->BuildSrcOpnd(fieldRegSlot);
+ IR::RegOpnd * src1Opnd = this->BuildSrcOpnd(regSlot);
+ IntConstType value = slotId;
+ IR::IntConstOpnd * valOpnd = IR::IntConstOpnd::New(value, TyInt32, m_func);
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src1Opnd, m_func);
+ this->AddInstr(instr, offset);
+ offset = Js::Constants::NoByteCodeOffset;
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), valOpnd, instr->GetDst(), m_func);
+ this->AddInstr(instr, offset);
+
+ instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), src0Opnd, instr->GetDst(), m_func);
+ this->AddInstr(instr, offset);
+
+ IR::Opnd * firstArg = instr->GetDst();
+ instr = IR::Instr::New(newOpcode, m_func);
+ instr->SetSrc1(firstArg);
+ break;
+ }
+
default:
AssertMsg(UNREACHED, "Unknown ElementSlot opcode");
Fatal();
@@ -3719,7 +3766,7 @@ IRBuilder::BuildElementSlotI1(Js::OpCode newOpcode, uint32 offset, Js::RegSlot r
if (stackFuncPtrSym)
{
IR::RegOpnd * dataOpnd = IR::RegOpnd::New(TyVar, m_func);
- instr = IR::Instr::New(Js::OpCode::NewScFuncData, dataOpnd, environmentOpnd,
+ instr = IR::Instr::New(Js::OpCode::NewScFuncData, dataOpnd, environmentOpnd,
IR::RegOpnd::New(stackFuncPtrSym, TyVar, m_func), m_func);
this->AddInstr(instr, offset);
instr = IR::Instr::New(newOpcode, regOpnd, functionBodySlotOpnd, dataOpnd, m_func);
@@ -3751,7 +3798,7 @@ IRBuilder::BuildElementSlotI1(Js::OpCode newOpcode, uint32 offset, Js::RegSlot r
IR::Opnd*
IRBuilder::GetEnvironmentOperand(uint32 offset)
{
- StackSym* sym = nullptr;
+ SymID symID;
// The byte code doesn't refer directly to a closure environment. Get the implicit one
// that's pointed to by the function body.
if (m_func->DoStackFrameDisplay() && m_func->GetLocalFrameDisplaySym())
@@ -3762,35 +3809,19 @@ IRBuilder::GetEnvironmentOperand(uint32 offset)
this->AddInstr(
IR::Instr::New(Js::OpCode::LdSlotArr, regOpnd, fieldOpnd, m_func),
offset);
- sym = regOpnd->m_sym;
+ symID = regOpnd->m_sym->m_id;
}
else
{
- SymID symID;
symID = this->GetEnvRegForInnerFrameDisplay();
Assert(symID != Js::Constants::NoRegister);
if (IsLoopBody() && !RegIsConstant(symID))
{
this->EnsureLoopBodyLoadSlot(symID);
}
-
- if (m_func->DoStackNestedFunc() && symID == GetEnvReg())
- {
- // Environment is not guaranteed constant during this function because it could become boxed during execution,
- // so load the environment every time you need it.
- IR::RegOpnd *regOpnd = IR::RegOpnd::New(TyVar, m_func);
- this->AddInstr(
- IR::Instr::New(Js::OpCode::LdEnv, regOpnd, m_func),
- offset);
- sym = regOpnd->m_sym;
- }
- else
- {
- sym = StackSym::FindOrCreate(symID, (Js::RegSlot)symID, m_func);
- }
}
- return IR::RegOpnd::New(sym, TyVar, m_func);
+ return IR::RegOpnd::New(StackSym::FindOrCreate(symID, (Js::RegSlot)symID, m_func), TyVar, m_func);
}
template
@@ -3849,7 +3880,7 @@ IRBuilder::BuildElementSlotI2(Js::OpCode newOpcode, uint32 offset, Js::RegSlot r
fieldSym = PropertySym::New(regOpnd->m_sym, slotId2, (uint32)-1, (uint)-1, PropertyKindSlots, m_func);
fieldOpnd = IR::SymOpnd::New(fieldSym, TyVar, m_func);
-
+
if (newOpcode == Js::OpCode::LdModuleSlot)
{
newOpcode = Js::OpCode::LdSlot;
@@ -3972,7 +4003,7 @@ IRBuilder::BuildElementSlotI2(Js::OpCode newOpcode, uint32 offset, Js::RegSlot r
m_func->GetTopFunc()->AddSlotArrayCheck(fieldOpnd);
}
}
- newOpcode =
+ newOpcode =
newOpcode == Js::OpCode::StInnerObjSlot || newOpcode == Js::OpCode::StInnerSlot ?
Js::OpCode::StSlot : Js::OpCode::StSlotChkUndecl;
instr = IR::Instr::New(newOpcode, fieldOpnd, regOpnd, m_func);
@@ -4089,7 +4120,7 @@ IRBuilder::BuildElementSlotI3(Js::OpCode newOpcode, uint32 offset, Js::RegSlot f
IR::Opnd * environmentOpnd = this->BuildSrcOpnd(fieldRegSlot);
IR::Opnd * homeObjOpnd = this->BuildSrcOpnd(homeObj);
regOpnd = this->BuildDstOpnd(regSlot);
-
+
instr = IR::Instr::New(Js::OpCode::ExtendArg_A, IR::RegOpnd::New(TyVar, m_func), homeObjOpnd, m_func);
this->AddInstr(instr, offset);
@@ -4100,7 +4131,7 @@ IRBuilder::BuildElementSlotI3(Js::OpCode newOpcode, uint32 offset, Js::RegSlot f
this->AddInstr(instr, offset);
instr = IR::Instr::New(newOpcode, regOpnd, instr->GetDst(), m_func);
-
+
if (regOpnd->m_sym->m_isSingleDef)
{
regOpnd->m_sym->m_isSafeThis = true;
@@ -4137,12 +4168,7 @@ IRBuilder::EnsureLoopBodyLoadSlot(SymID symId, bool isCatchObjectSym)
return;
}
StackSym * symDst = StackSym::FindOrCreate(symId, (Js::RegSlot)symId, m_func);
- if (symDst->m_isCatchObjectSym)
- {
- return;
- }
- AssertOrFailFast(symId < m_ldSlots->Length());
- if (this->m_ldSlots->TestAndSet(symId))
+ if (symDst->m_isCatchObjectSym || this->m_ldSlots->TestAndSet(symId))
{
return;
}
@@ -4186,7 +4212,6 @@ IRBuilder::SetLoopBodyStSlot(SymID symID, bool isCatchObjectSym)
return;
}
}
- AssertOrFailFast(symID < m_stSlots->Length());
this->m_stSlots->Set(symID);
}
@@ -4678,7 +4703,7 @@ IRBuilder::BuildProfiledElementCP(Js::OpCode newOpcode, uint32 offset, Js::RegSl
{
isProfiled = false;
}
-
+
bool wasNotProfiled = false;
IR::Instr *instr = nullptr;
diff --git a/deps/chakrashim/core/lib/Backend/IRBuilder.h b/deps/chakrashim/core/lib/Backend/IRBuilder.h
index c31e036ed9d..f1abb4bd18a 100644
--- a/deps/chakrashim/core/lib/Backend/IRBuilder.h
+++ b/deps/chakrashim/core/lib/Backend/IRBuilder.h
@@ -322,7 +322,7 @@ class IRBuilder
void InsertDoneLoopBodyLoopCounter(uint32 lastOffset);
IR::RegOpnd * InsertConvPrimStr(IR::RegOpnd * srcOpnd, uint offset, bool forcePreOpBailOutIfNeeded);
- IR::Opnd * IRBuilder::GetEnvironmentOperand(uint32 offset);
+ IR::Opnd * GetEnvironmentOperand(uint32 offset);
bool DoLoadInstructionArrayProfileInfo();
bool AllowNativeArrayProfileInfo();
diff --git a/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.cpp b/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.cpp
index 7434dd0b667..efbf8431739 100644
--- a/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.cpp
+++ b/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.cpp
@@ -3570,10 +3570,7 @@ IRBuilderAsmJs::BuildAsmJsLoopBodySlotOpnd(Js::RegSlot regSlot, IRType opndType)
void
IRBuilderAsmJs::EnsureLoopBodyAsmJsLoadSlot(Js::RegSlot regSlot, IRType type)
{
- BVFixed* ldSlotsBV = GetJitLoopBodyData().GetLdSlots();
-
- AssertOrFailFast(regSlot < ldSlotsBV->Length());
- if (ldSlotsBV->TestAndSet(regSlot))
+ if (GetJitLoopBodyData().GetLdSlots()->TestAndSet(regSlot))
{
return;
}
@@ -3595,6 +3592,7 @@ void
IRBuilderAsmJs::EnsureLoopBodyAsmJsStoreSlot(Js::RegSlot regSlot, IRType type)
{
Assert(!RegIsTemp(regSlot) || RegIsJitLoopYield(regSlot));
+ GetJitLoopBodyData().GetStSlots()->Set(regSlot);
EnsureLoopBodyAsmJsLoadSlot(regSlot, type);
}
diff --git a/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.h b/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.h
index a407eab3f5d..ddc24928022 100644
--- a/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.h
+++ b/deps/chakrashim/core/lib/Backend/IRBuilderAsmJs.h
@@ -26,6 +26,7 @@ struct JitLoopBodyData
{
private:
BVFixed* m_ldSlots = nullptr;
+ BVFixed* m_stSlots = nullptr;
StackSym* m_loopBodyRetIPSym = nullptr;
BVFixed* m_yieldRegs = nullptr;
uint32 m_loopCurRegs[WAsmJs::LIMIT];
@@ -35,6 +36,7 @@ struct JitLoopBodyData
{
Assert(ldSlots && stSlots && loopBodyRetIPSym);
m_ldSlots = ldSlots;
+ m_stSlots = stSlots;
m_loopBodyRetIPSym = loopBodyRetIPSym;
}
// Use m_yieldRegs initialization to determine if m_loopCurRegs is initialized
@@ -55,19 +57,14 @@ struct JitLoopBodyData
}
bool IsYieldReg(Js::RegSlot reg) const
{
- if (!m_yieldRegs)
- {
- return false;
- }
- AssertOrFailFast(reg < m_yieldRegs->Length());
- return m_yieldRegs->Test(reg);
+ return m_yieldRegs && m_yieldRegs->Test(reg);
}
void SetRegIsYield(Js::RegSlot reg)
{
Assert(m_yieldRegs);
- AssertOrFailFast(reg < m_yieldRegs->Length());
m_yieldRegs->Set(reg);
}
+ BVFixed* GetStSlots() const { return m_stSlots; }
BVFixed* GetLdSlots() const { return m_ldSlots; }
StackSym* GetLoopBodyRetIPSym() const { return m_loopBodyRetIPSym; }
diff --git a/deps/chakrashim/core/lib/Backend/InductionVariable.cpp b/deps/chakrashim/core/lib/Backend/InductionVariable.cpp
index 275370ef4f7..bbb71e7a3ee 100644
--- a/deps/chakrashim/core/lib/Backend/InductionVariable.cpp
+++ b/deps/chakrashim/core/lib/Backend/InductionVariable.cpp
@@ -81,21 +81,6 @@ bool InductionVariable::Add(const int n)
if(n == 0)
return true;
- int lowerBound = changeBounds.LowerBound();
- int upperBound = changeBounds.UpperBound();
-
- if (n < 0 && (lowerBound < upperBound || (lowerBound == upperBound && lowerBound > 0)))
- {
- isChangeDeterminate = false;
- return false;
- }
-
- if (n > 0 && (lowerBound > upperBound || (lowerBound == upperBound && lowerBound < 0)))
- {
- isChangeDeterminate = false;
- return false;
- }
-
int newLowerBound;
if(changeBounds.LowerBound() == IntConstMin)
{
@@ -163,25 +148,6 @@ void InductionVariable::Merge(const InductionVariable &other)
// The value number may be different, the caller will give the merged info the appropriate value number
isChangeDeterminate &= other.isChangeDeterminate;
- if(!isChangeDeterminate)
- return;
-
- int lowerBound = this->ChangeBounds().LowerBound();
- int upperBound = this->ChangeBounds().UpperBound();
-
- int otherLowerBound = other.ChangeBounds().LowerBound();
- int otherUpperBound = other.ChangeBounds().UpperBound();
-
- if ((lowerBound < upperBound || (lowerBound == upperBound && lowerBound > 0)) && !(otherLowerBound < otherUpperBound || (otherLowerBound == otherUpperBound && otherLowerBound > 0)))
- {
- isChangeDeterminate = false;
- }
-
- if ((lowerBound > upperBound || (lowerBound == upperBound && lowerBound < 0)) && !(otherLowerBound > otherUpperBound || (otherLowerBound == otherUpperBound && otherLowerBound < 0)))
- {
- isChangeDeterminate = false;
- }
-
if(!isChangeDeterminate)
return;
diff --git a/deps/chakrashim/core/lib/Backend/Inline.cpp b/deps/chakrashim/core/lib/Backend/Inline.cpp
index a09ba6bbb4a..0b65c7ceb9f 100644
--- a/deps/chakrashim/core/lib/Backend/Inline.cpp
+++ b/deps/chakrashim/core/lib/Backend/Inline.cpp
@@ -69,7 +69,7 @@ Inline::Optimize(Func *func, __in_ecount_opt(callerArgOutCount) IR::Instr *calle
if (instr->AsLabelInstr()->m_isForInExit)
{
- AssertOrFailFast(this->currentForInDepth != 0);
+ Assert(this->currentForInDepth != 0);
this->currentForInDepth--;
}
}
@@ -213,7 +213,7 @@ Inline::Optimize(Func *func, __in_ecount_opt(callerArgOutCount) IR::Instr *calle
{
if (PHASE_ENABLED(InlineCallbacksPhase, func))
{
- callbackDefInstr = TryGetCallbackDefInstr(instr);
+ callbackDefInstr = TryGetCallbackDefInstrForCallInstr(instr);
}
if (callbackDefInstr == nullptr)
@@ -228,7 +228,7 @@ Inline::Optimize(Func *func, __in_ecount_opt(callerArgOutCount) IR::Instr *calle
{
if (PHASE_ENABLED(InlineCallbacksPhase, func))
{
- callbackDefInstr = TryGetCallbackDefInstr(instr);
+ callbackDefInstr = TryGetCallbackDefInstrForCallInstr(instr);
if (callbackDefInstr == nullptr)
{
isPolymorphic = true;
@@ -244,12 +244,12 @@ Inline::Optimize(Func *func, __in_ecount_opt(callerArgOutCount) IR::Instr *calle
{
Js::ProfileId callSiteId = static_cast(callbackDefInstr->AsProfiledInstr()->u.profileId);
inlineeData = callbackDefInstr->m_func->GetWorkItem()->GetJITTimeInfo()->GetCallbackInlinee(callSiteId);
- if (PHASE_TESTTRACE(Js::InlineCallbacksPhase, func) || PHASE_TRACE(Js::InlineCallbacksPhase, func))
- {
- char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
- Output::Print(_u("INLINING : Inlining callback at\tCallSite: %d\tCaller: %s (%s)\n"),
- callSiteId, inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer));
- }
+
+#if ENABLE_DEBUG_CONFIG_OPTIONS
+ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
+ INLINE_CALLBACKS_TRACE(_u("INLINING : Inlining callback at\tCallSite: %d\tCaller: %s (%s)\n"),
+ callSiteId, inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer));
+#endif
}
if (isPolymorphic)
@@ -1010,20 +1010,54 @@ Inline::InlinePolymorphicFunctionUsingFixedMethods(IR::Instr *callInstr, const F
return instrNext;
}
-IR::Instr * Inline::TryGetCallbackDefInstr(IR::Instr * callInstr)
+IR::RegOpnd * Inline::GetCallbackFunctionOpnd(IR::Instr * callInstr)
+{
+ IR::Instr * callApplyLdInstr = callInstr->GetSrc1()->GetStackSym()->GetInstrDef();
+ IR::Instr * targetDefInstr = callApplyLdInstr->GetSrc1()->AsPropertySymOpnd()->GetObjectSym()->GetInstrDef();
+ return targetDefInstr->GetDst()->AsRegOpnd();
+}
+
+IR::Instr * Inline::TryGetCallbackDefInstrForCallInstr(IR::Instr * callInstr)
{
// Try to find a function argument that could be inlined.
- IR::Instr * defInstr = callInstr;
- StackSym * linkSym = callInstr->GetSrc1()->GetStackSym();
- Assert(linkSym != nullptr);
+ StackSym * callbackSym = callInstr->GetSrc1()->GetStackSym();
+ Assert(callbackSym != nullptr);
+ return TryGetCallbackDefInstr(callbackSym);
+}
- Inline * currFrame = this;
+IR::Instr * Inline::TryGetCallbackDefInstrForCallApplyTarget(IR::Instr * callApplyLdInstr)
+{
+ // Try to find a function argument that could be inlined.
+ StackSym * callbackSym = callApplyLdInstr->GetSrc1()->AsPropertySymOpnd()->GetObjectSym();
+ Assert(callbackSym != nullptr);
+ return TryGetCallbackDefInstr(callbackSym);
+}
- while (linkSym->m_isSingleDef)
+IR::Instr * Inline::TryGetCallbackDefInstrForCallInstanceFunction(IR::Instr * callInstr)
+{
+ IR::Instr * argImplicitThisInstr = nullptr;
+ IR::Instr * argFunction = nullptr;
+
+ callInstr->IterateArgInstrs([&](IR::Instr* argInstr) {
+ argFunction = argImplicitThisInstr;
+ argImplicitThisInstr = argInstr;
+ return false;
+ });
+
+ StackSym * callbackSym = argFunction->GetSrc1()->GetStackSym();
+ Assert(callbackSym != nullptr);
+ return TryGetCallbackDefInstr(callbackSym);
+}
+
+IR::Instr * Inline::TryGetCallbackDefInstr(StackSym * callbackSym)
+{
+ Inline * currFrame = this;
+ IR::Instr * defInstr = nullptr;
+ while (callbackSym->m_isSingleDef)
{
- if (linkSym->m_instrDef != nullptr)
+ if (callbackSym->m_instrDef != nullptr)
{
- defInstr = linkSym->m_instrDef;
+ defInstr = callbackSym->m_instrDef;
}
else
{
@@ -1037,7 +1071,7 @@ IR::Instr * Inline::TryGetCallbackDefInstr(IR::Instr * callInstr)
Assert(callingInstr != nullptr && callingInstr->IsProfiledInstr());
Js::ProfileId callSiteId = static_cast(callingInstr->AsProfiledInstr()->u.profileId);
- Js::ArgSlot argIndex = linkSym->GetParamSlotNum() - 1;
+ Js::ArgSlot argIndex = callbackSym->GetParamSlotNum() - 1;
Func * callingFunc = callingInstr->m_func;
if (!callingFunc->GetReadOnlyProfileInfo()->CanInlineCallback(argIndex, callSiteId))
@@ -1074,8 +1108,8 @@ IR::Instr * Inline::TryGetCallbackDefInstr(IR::Instr * callInstr)
return nullptr;
}
- linkSym = linkOpnd->GetStackSym();
- if (linkSym == nullptr)
+ callbackSym = linkOpnd->GetStackSym();
+ if (callbackSym == nullptr)
{
return nullptr;
}
@@ -1326,12 +1360,13 @@ void Inline::InsertOneInlinee(IR::Instr* callInstr, IR::RegOpnd* returnValueOpnd
Js::ArgSlot actualCount = MapActuals(currentCallInstr, argOuts, Js::InlineeCallInfo::MaxInlineeArgoutCount, inlinee, (Js::ProfileId)callInstr->AsProfiledInstr()->u.profileId, &stackArgsArgOutExpanded);
Assert(actualCount > 0);
MapFormals(inlinee, argOuts, funcBody->GetInParamsCount(), actualCount, returnValueOpnd, currentCallInstr->GetSrc1(), symCallerThis, stackArgsArgOutExpanded, fixedFunctionSafeThis, argOuts);
+ inlinee->SetInlineeStart(currentCallInstr);
currentCallInstr->m_func = inlinee;
// Put the meta arguments that the stack walker expects to find on the stack.
// As all the argouts are shared among the inlinees, do this only once.
SetupInlineeFrame(inlinee, currentCallInstr, actualCount, currentCallInstr->GetSrc1());
-
+
IR::Instr* inlineeEndInstr = IR::Instr::New(Js::OpCode::InlineeEnd, inlinee);
inlineeEndInstr->SetByteCodeOffset(inlinee->m_tailInstr->GetPrevRealInstr());
inlineeEndInstr->SetSrc1(IR::IntConstOpnd::New(actualCount + Js::Constants::InlineeMetaArgCount, TyInt32, inlinee));
@@ -2158,15 +2193,17 @@ Inline::InlineBuiltInFunction(IR::Instr *callInstr, const FunctionJITTimeInfo *
IR::Instr *inlineBuiltInEndInstr = nullptr;
if (inlineCallOpCode == Js::OpCode::InlineFunctionApply)
{
- return InlineApply(callInstr, inlineeData, inlinerData, symCallerThis, pIsInlined, profileId, recursiveInlineDepth, inlineCallArgCount - (usesThisArgument ? 1 : 0));
+ inlineBuiltInEndInstr = InlineApply(callInstr, inlineeData, inlinerData, symCallerThis, pIsInlined, profileId, recursiveInlineDepth, inlineCallArgCount - (usesThisArgument ? 1 : 0));
+ return inlineBuiltInEndInstr->m_next;
}
- if (inlineCallOpCode == Js::OpCode::InlineFunctionCall)
+ if (inlineCallOpCode == Js::OpCode::InlineFunctionCall || inlineCallOpCode == Js::OpCode::InlineCallInstanceFunction)
{
- return InlineCall(callInstr, inlineeData, inlinerData, symCallerThis, pIsInlined, profileId, recursiveInlineDepth);
+ const bool isCallInstanceFunction = (inlineCallOpCode == Js::OpCode::InlineCallInstanceFunction);
+ inlineBuiltInEndInstr = InlineCall(callInstr, inlineeData, inlinerData, symCallerThis, pIsInlined, profileId, recursiveInlineDepth, isCallInstanceFunction);
+ return inlineBuiltInEndInstr->m_next;
}
-
#if defined(ENABLE_DEBUG_CONFIG_OPTIONS)
TraceInlining(inlinerData, Js::JavascriptLibrary::GetNameForBuiltIn(builtInId),
nullptr, 0, this->topFunc->GetWorkItem()->GetJITTimeInfo(), 0, nullptr, profileId, callInstr->m_func->GetTopFunc()->IsLoopBody(), builtInId);
@@ -2413,10 +2450,9 @@ IR::Instr* Inline::InlineApply(IR::Instr *callInstr, const FunctionJITTimeInfo *
// We may still decide not to inline.
*pIsInlined = false;
- IR::Instr* instrNext = callInstr->m_next;
if (argsCount == 0)
{
- return instrNext;
+ return callInstr;
}
Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInForFuncInfo(applyData->GetLocalFunctionId());
@@ -2458,7 +2494,7 @@ IR::Instr* Inline::InlineApply(IR::Instr *callInstr, const FunctionJITTimeInfo *
if (PHASE_OFF1(Js::InlineApplyWithoutArrayArgPhase))
{
*pIsInlined = false;
- return instrNext;
+ return callInstr;
}
*pIsInlined = true;
@@ -2489,7 +2525,7 @@ IR::Instr* Inline::InlineApply(IR::Instr *callInstr, const FunctionJITTimeInfo *
{
INLINE_TESTTRACE(_u("INLINING: Skip Inline: Supporting inlining func.apply(this, array) or func.apply(this, arguments) with formals in the parent function only when func is a built-in inlinable as apply target \tCaller: %s (%s)\n"),
inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer));
- return instrNext;
+ return callInstr;
}
}
@@ -2505,7 +2541,6 @@ IR::Instr* Inline::InlineApply(IR::Instr *callInstr, const FunctionJITTimeInfo *
IR::Instr * Inline::InlineApplyWithArgumentsObject(IR::Instr * callInstr, IR::Instr * argsObjectArgInstr, const FunctionJITTimeInfo * funcInfo)
{
- IR::Instr* instrNext = callInstr->m_next;
IR::Instr* ldHeapArguments = argsObjectArgInstr->GetSrc1()->GetStackSym()->GetInstrDef();
argsObjectArgInstr->ReplaceSrc1(ldHeapArguments->GetDst());
@@ -2595,7 +2630,7 @@ IR::Instr * Inline::InlineApplyWithArgumentsObject(IR::Instr * callInstr, IR::In
argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, linkOpnd2, explicitThisArgOut->GetSrc1(), linkOpnd1, callInstr->m_func); // push explicit this as this pointer
callInstr->InsertBefore(argout);
- return instrNext;
+ return callInstr;
}
/*
@@ -2603,7 +2638,6 @@ This method will only do CallDirect style inlining of built-in targets. No scrip
*/
IR::Instr * Inline::InlineApplyBuiltInTargetWithArray(IR::Instr * callInstr, const FunctionJITTimeInfo * applyInfo, const FunctionJITTimeInfo * builtInInfo)
{
- IR::Instr* instrNext = callInstr->m_next;
IR::Instr * implicitThisArgOut = nullptr;
IR::Instr * explicitThisArgOut = nullptr;
IR::Instr * arrayArgOut = nullptr;
@@ -2619,9 +2653,9 @@ IR::Instr * Inline::InlineApplyBuiltInTargetWithArray(IR::Instr * callInstr, con
IR::Instr* applyLdInstr = nullptr;
IR::Instr* applyTargetLdInstr = nullptr;
- if (!TryGetApplyAndTargetLdInstrs(callInstr, &applyLdInstr, &applyTargetLdInstr))
+ if (!TryGetCallApplyAndTargetLdInstrs(callInstr, &applyLdInstr, &applyTargetLdInstr))
{
- return instrNext;
+ return callInstr;
}
// Fixed function/function object checks for target built-in
callInstr->ReplaceSrc1(applyTargetLdInstr->GetDst());
@@ -2686,12 +2720,11 @@ IR::Instr * Inline::InlineApplyBuiltInTargetWithArray(IR::Instr * callInstr, con
callInstr->ReplaceSrc1(helperCallOpnd);
callInstr->ReplaceSrc2(argOut->GetDst());
- return instrNext;
+ return callInstr;
}
IR::Instr * Inline::InlineApplyWithoutArrayArgument(IR::Instr *callInstr, const FunctionJITTimeInfo * applyInfo, const FunctionJITTimeInfo * applyTargetInfo)
{
- IR::Instr* instrNext = callInstr->m_next;
IR::Instr * implicitThisArgOut = nullptr;
IR::Instr * explicitThisArgOut = nullptr;
IR::Instr * dummyInstr = nullptr;
@@ -2730,12 +2763,12 @@ IR::Instr * Inline::InlineApplyWithoutArrayArgument(IR::Instr *callInstr, const
if (!callTargetStackSym->IsSingleDef())
{
- return instrNext;
+ return callInstr;
}
if (!applyTargetInfo)
{
- return instrNext;
+ return callInstr;
}
bool safeThis = false;
@@ -2747,7 +2780,7 @@ IR::Instr * Inline::InlineApplyWithoutArrayArgument(IR::Instr *callInstr, const
callInstr->InsertBefore(bytecodeUses);
}
- return instrNext;
+ return callInstr;
}
void Inline::GetArgInstrsForCallAndApply(IR::Instr* callInstr, IR::Instr** implicitThisArgOut, IR::Instr** explicitThisArgOut, IR::Instr** argumentsOrArrayArgOut, uint &argOutCount)
@@ -2775,7 +2808,7 @@ void Inline::GetArgInstrsForCallAndApply(IR::Instr* callInstr, IR::Instr** impli
}
_Success_(return != false)
-bool Inline::TryGetApplyAndTargetLdInstrs(IR::Instr * callInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyLdInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyTargetLdInstr)
+bool Inline::TryGetCallApplyAndTargetLdInstrs(IR::Instr * callInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyLdInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyTargetLdInstr)
{
IR::Opnd* applyOpnd = callInstr->GetSrc1();
Assert(applyOpnd->IsRegOpnd());
@@ -2786,11 +2819,66 @@ bool Inline::TryGetApplyAndTargetLdInstrs(IR::Instr * callInstr, _Outptr_result_
*applyTargetLdInstr = nullptr;
return false;
}
- *applyLdInstr = applySym->GetInstrDef();;
+ *applyLdInstr = applySym->GetInstrDef();
*applyTargetLdInstr = (*applyLdInstr)->m_prev;
return true;
}
+bool
+Inline::TryGetCallApplyInlineeData(
+ const FunctionJITTimeInfo* inlinerData,
+ IR::Instr * callInstr,
+ IR::Instr * callApplyLdInstr,
+ IR::Instr * callApplyTargetLdInstr,
+ const FunctionJITTimeInfo ** inlineeData,
+ Js::InlineCacheIndex * inlineCacheIndex,
+ IR::Instr ** callbackDefInstr,
+ bool isCallInstanceFunction
+ )
+{
+ *callbackDefInstr = nullptr;
+
+ if (callApplyTargetLdInstr->m_opcode != Js::OpCode::LdFldForCallApplyTarget ||
+ ((callApplyTargetLdInstr->AsProfiledInstr()->u.FldInfo().flags & Js::FldInfo_FromAccessor) != 0))
+ {
+ // Try to find a callback def instr for the method.
+ if (isCallInstanceFunction)
+ {
+ *callbackDefInstr = TryGetCallbackDefInstrForCallInstanceFunction(callInstr);
+ }
+ else
+ {
+ *callbackDefInstr = TryGetCallbackDefInstrForCallApplyTarget(callApplyLdInstr);
+ }
+
+ if (*callbackDefInstr == nullptr)
+ {
+ return false;
+ }
+
+ Js::ProfileId callSiteId = static_cast((*callbackDefInstr)->AsProfiledInstr()->u.profileId);
+ *inlineeData = (*callbackDefInstr)->m_func->GetWorkItem()->GetJITTimeInfo()->GetCallbackInlinee(callSiteId);
+
+#if ENABLE_DEBUG_CONFIG_OPTIONS
+ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
+ INLINE_CALLBACKS_TRACE(_u("INLINING : Found callback def instr for call/apply target callback at\tCallSite: %d\tCaller: %s (%s)\n"),
+ callSiteId, inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer));
+#endif
+
+ return true;
+ }
+
+ IR::Opnd *callTargetLdOpnd = callApplyTargetLdInstr->GetSrc1();
+ if (!callTargetLdOpnd->IsSymOpnd() || !callTargetLdOpnd->AsSymOpnd()->IsPropertySymOpnd())
+ {
+ return false;
+ }
+
+ *inlineCacheIndex = callTargetLdOpnd->AsPropertySymOpnd()->m_inlineCacheIndex;
+ *inlineeData = inlinerData->GetLdFldInlinee(*inlineCacheIndex);
+ return true;
+}
+
/*
This method only inlines targets which are script functions, under the
condition that the second argument (if any) passed to apply is arguments object.
@@ -2814,26 +2902,19 @@ bool Inline::InlineApplyScriptTarget(IR::Instr *callInstr, const FunctionJITTime
IR::Instr* applyLdInstr = nullptr;
IR::Instr* applyTargetLdInstr = nullptr;
- if (!TryGetApplyAndTargetLdInstrs(callInstr, &applyLdInstr, &applyTargetLdInstr))
+ if (!TryGetCallApplyAndTargetLdInstrs(callInstr, &applyLdInstr, &applyTargetLdInstr))
{
return false;
}
- if(applyTargetLdInstr->m_opcode != Js::OpCode::LdFldForCallApplyTarget ||
- ((applyTargetLdInstr->AsProfiledInstr()->u.FldInfo().flags & Js::FldInfo_FromAccessor) != 0))
- {
- return false;
- }
-
- IR::Opnd *applyTargetLdOpnd = applyTargetLdInstr->GetSrc1();
- if (!applyTargetLdOpnd->IsSymOpnd() || !applyTargetLdOpnd->AsSymOpnd()->IsPropertySymOpnd())
+ const FunctionJITTimeInfo * inlineeData = nullptr;
+ Js::InlineCacheIndex inlineCacheIndex = 0;
+ IR::Instr * callbackDefInstr = nullptr;
+ if (!TryGetCallApplyInlineeData(inlinerData, callInstr, applyLdInstr, applyTargetLdInstr, &inlineeData, &inlineCacheIndex, &callbackDefInstr, false))
{
return false;
}
- const auto inlineCacheIndex = applyTargetLdOpnd->AsPropertySymOpnd()->m_inlineCacheIndex;
- const auto inlineeData = inlinerData->GetLdFldInlinee(inlineCacheIndex);
-
if ((!isArrayOpndArgumentsObject && (argsCount != 1)) || SkipCallApplyScriptTargetInlining_Shared(callInstr, inlinerData, inlineeData, /*isApplyTarget*/ true, /*isCallTarget*/ false))
{
*pInlineeData = inlineeData;
@@ -2847,10 +2928,13 @@ bool Inline::InlineApplyScriptTarget(IR::Instr *callInstr, const FunctionJITTime
return false;
}
+ const bool targetIsCallback = callbackDefInstr != nullptr;
+
StackSym* originalCallTargetStackSym = callInstr->GetSrc1()->GetStackSym();
bool originalCallTargetOpndIsJITOpt = callInstr->GetSrc1()->GetIsJITOptimizedReg();
bool safeThis = false;
- if (!TryGetFixedMethodsForBuiltInAndTarget(callInstr, inlinerData, inlineeData, applyFuncInfo, applyLdInstr, applyTargetLdInstr, safeThis, /*isApplyTarget*/ true))
+
+ if (!TryGetFixedMethodsForBuiltInAndTarget(callInstr, inlinerData, inlineeData, applyFuncInfo, applyLdInstr, applyTargetLdInstr, safeThis, /*isApplyTarget*/ true, targetIsCallback))
{
return false;
}
@@ -2931,16 +3015,27 @@ bool Inline::InlineApplyScriptTarget(IR::Instr *callInstr, const FunctionJITTime
startCall->GetSrc1()->AsIntConstOpnd()->IncrValue(-1); // update the count of argouts as seen by JIT, in the start call instruction
*returnInstr = InlineCallApplyTarget_Shared(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlineCacheIndex,
- safeThis, /*isApplyTarget*/ true, /*isCallTarget*/ false, recursiveInlineDepth);
+ safeThis, /*isApplyTarget*/ true, /*isCallTarget*/ false, callbackDefInstr, recursiveInlineDepth);
return true;
}
IR::Instr *
Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTargetOpndIsJITOpt, StackSym* originalCallTargetStackSym, const FunctionJITTimeInfo *const inlineeData,
- uint inlineCacheIndex, bool safeThis, bool isApplyTarget, bool isCallTarget, uint recursiveInlineDepth)
+ uint inlineCacheIndex, bool safeThis, bool isApplyTarget, bool isCallTarget, IR::Instr * inlineeDefInstr, uint recursiveInlineDepth)
{
Assert(isApplyTarget ^ isCallTarget);
+ const bool isCallback = inlineeDefInstr != nullptr;
+
+#if ENABLE_DEBUG_CONFIG_OPTIONS
+ if (isCallback)
+ {
+ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
+ INLINE_CALLBACKS_TRACE(_u("INLINING CALLBACK : Inlining callback for call/apply target : \t%s (%s)\n"), inlineeData->GetBody()->GetDisplayName(),
+ inlineeData->GetDebugNumberSet(debugStringBuffer));
+ }
+#endif
+
// returnValueOpnd
IR::RegOpnd * returnValueOpnd;
Js::RegSlot returnRegSlot;
@@ -2970,6 +3065,10 @@ Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTarg
workItemData->jitData = (FunctionJITTimeDataIDL*)(inlineeData);
JITTimeWorkItem * jitWorkItem = JitAnew(this->topFunc->m_alloc, JITTimeWorkItem, workItemData);
+ const FunctionJITRuntimeInfo * runtimeInfo = (inlineeDefInstr == nullptr)
+ ? callInstr->m_func->GetWorkItem()->GetJITTimeInfo()->GetLdFldInlineeRuntimeData(inlineCacheIndex)
+ : inlineeDefInstr->m_func->GetWorkItem()->GetJITTimeInfo()->GetInlineeForCallbackInlineeRuntimeData(static_cast(inlineeDefInstr->AsProfiledInstr()->u.profileId), inlineeData->GetBody()->GetAddr());
+
JITTimePolymorphicInlineCacheInfo * entryPointPolymorphicInlineCacheInfo = inlineeData->HasBody() ? this->topFunc->GetWorkItem()->GetInlineePolymorphicInlineCacheInfo(inlineeData->GetBody()->GetAddr()) : nullptr;
#if !FLOATVAR
Func * inlinee = JitAnew(this->topFunc->m_alloc,
@@ -2980,7 +3079,7 @@ Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTarg
this->topFunc->GetScriptContextInfo(),
this->topFunc->GetJITOutput()->GetOutputData(),
nullptr,
- callInstr->m_func->GetWorkItem()->GetJITTimeInfo()->GetLdFldInlineeRuntimeData(inlineCacheIndex),
+ runtimeInfo,
entryPointPolymorphicInlineCacheInfo,
this->topFunc->GetCodeGenAllocators(),
this->topFunc->GetNumberAllocator(),
@@ -3001,7 +3100,7 @@ Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTarg
this->topFunc->GetScriptContextInfo(),
this->topFunc->GetJITOutput()->GetOutputData(),
nullptr,
- callInstr->m_func->GetWorkItem()->GetJITTimeInfo()->GetLdFldInlineeRuntimeData(inlineCacheIndex),
+ runtimeInfo,
entryPointPolymorphicInlineCacheInfo,
this->topFunc->GetCodeGenAllocators(),
this->topFunc->GetCodeGenProfiler(),
@@ -3017,7 +3116,7 @@ Inline::InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTarg
// instrNext
IR::Instr* instrNext = callInstr->m_next;
- return InlineFunctionCommon(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlinee, instrNext, returnValueOpnd, callInstr, nullptr, recursiveInlineDepth, safeThis, isApplyTarget);
+ return InlineFunctionCommon(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlinee, instrNext, returnValueOpnd, callInstr, nullptr, recursiveInlineDepth, safeThis, isApplyTarget)->m_prev;
}
IR::Opnd *
@@ -3029,9 +3128,8 @@ Inline::ConvertToInlineBuiltInArgOut(IR::Instr * argInstr)
}
IR::Instr*
-Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, const FunctionJITTimeInfo * inlinerData, const StackSym *symCallerThis, bool* pIsInlined, uint callSiteId, uint recursiveInlineDepth)
+Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, const FunctionJITTimeInfo * inlinerData, const StackSym *symCallerThis, bool* pIsInlined, uint callSiteId, uint recursiveInlineDepth, bool isCallInstanceFunction)
{
- IR::Instr* instrNext = callInstr->m_next;
Func *func = callInstr->m_func;
Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInForFuncInfo(funcInfo->GetLocalFunctionId());
@@ -3039,7 +3137,7 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
if (PHASE_OFF(Js::InlineCallPhase, this->topFunc) || PHASE_OFF(Js::InlineCallPhase, func)
|| !this->topFunc->GetJITFunctionBody()->GetInParamsCount())
{
- return instrNext;
+ return callInstr;
}
// Convert all the current ARG_OUT to ArgOut_A_InlineBuiltIn
@@ -3048,7 +3146,7 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
if (!GetDefInstr(linkOpnd)->GetSrc2()->IsSymOpnd())
{
// There is no benefit of inlining.call() with no arguments.
- return instrNext;
+ return callInstr;
}
*pIsInlined = true;
@@ -3057,7 +3155,7 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
IR::Instr * returnInstr = nullptr;
if (!PHASE_OFF(Js::InlineCallTargetPhase, this->topFunc))
{
- if (InlineCallTarget(callInstr, inlinerData, &inlineeData, funcInfo, symCallerThis, &returnInstr, recursiveInlineDepth))
+ if (InlineCallTarget(callInstr, inlinerData, &inlineeData, funcInfo, symCallerThis, &returnInstr, recursiveInlineDepth, isCallInstanceFunction))
{
Assert(returnInstr);
return returnInstr;
@@ -3074,12 +3172,13 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
// We are trying to optimize this.superConstructor.call(this, a, b,c);
// argImplicitInstr represents this.superConstructor which we need to call directly.
- IR::Instr * argImplicitInstr = nullptr;
- IR::Instr * dummyInstr1 = nullptr;
- IR::Instr * dummyInstr2 = nullptr;
- this->GetArgInstrsForCallAndApply(callInstr, &argImplicitInstr, &dummyInstr1, &dummyInstr2, actualCount);
+ IR::Instr * argImplicitThisInstr = nullptr;
+ IR::Instr * argSecond = nullptr;
+ IR::Instr * dummyInstr = nullptr;
+ this->GetArgInstrsForCallAndApply(callInstr, &argImplicitThisInstr, &argSecond, &dummyInstr, actualCount);
- Assert(argImplicitInstr);
+ IR::Instr * functionInstr = isCallInstanceFunction ? argSecond : argImplicitThisInstr;
+ Assert(functionInstr);
IR::SymOpnd* orgLinkOpnd = callInstr->GetSrc2()->AsSymOpnd();
@@ -3087,7 +3186,7 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
InsertInlineeBuiltInStartEndTags(callInstr, actualCount);
- uint actualCountToInlinedCall = actualCount - 1;
+ uint actualCountToInlinedCall = actualCount - (isCallInstanceFunction ? 2 : 1);
IR::Instr *startCall = IR::Instr::New(Js::OpCode::StartCall, func);
startCall->SetDst(IR::RegOpnd::New(TyVar, func));
@@ -3095,7 +3194,7 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
callInstr->InsertBefore(startCall);
- callInstr->ReplaceSrc1(argImplicitInstr->GetSrc1());
+ callInstr->ReplaceSrc1(functionInstr->GetSrc1());
callInstr->UnlinkSrc2();
callInstr->m_opcode = Js::OpCode::CallI;
@@ -3110,9 +3209,9 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
IR::Opnd *orgSrc1 = orgArgout->GetSrc1();
// Change ArgOut to use temp as src1.
- StackSym * stackSym = StackSym::New(orgSrc1->GetStackSym()->GetType(), argImplicitInstr->m_func);
- IR::Opnd* tempDst = IR::RegOpnd::New(stackSym, orgSrc1->GetType(), argImplicitInstr->m_func);
- IR::Instr *assignInstr = IR::Instr::New(Func::GetLoadOpForType(orgSrc1->GetType()), tempDst, orgSrc1, argImplicitInstr->m_func);
+ StackSym * stackSym = StackSym::New(orgSrc1->GetStackSym()->GetType(), functionInstr->m_func);
+ IR::Opnd* tempDst = IR::RegOpnd::New(stackSym, orgSrc1->GetType(), functionInstr->m_func);
+ IR::Instr *assignInstr = IR::Instr::New(Func::GetLoadOpForType(orgSrc1->GetType()), tempDst, orgSrc1, functionInstr->m_func);
assignInstr->SetByteCodeOffset(orgArgout);
tempDst->SetIsJITOptimizedReg(true);
orgArgout->InsertBefore(assignInstr);
@@ -3127,68 +3226,73 @@ Inline::InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, co
insertBeforeInstr = clonedArgout;
}
clonedArgout->SetSrc2(startCall->GetDst());
- Assert(GetDefInstr(orgLinkOpnd) == argImplicitInstr);
- return instrNext;
+ Assert(GetDefInstr(orgLinkOpnd) == functionInstr);
+ return callInstr;
}
bool
Inline::InlineCallTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo** pInlineeData, const FunctionJITTimeInfo *callFuncInfo,
- const StackSym *symCallerThis, IR::Instr ** returnInstr, uint recursiveInlineDepth)
+ const StackSym *symCallerThis, IR::Instr ** returnInstr, uint recursiveInlineDepth, bool isCallInstanceFunction)
{
- IR::Opnd* src1 = callInstr->GetSrc1();
- Assert(src1->IsRegOpnd());
- StackSym* sym = src1->AsRegOpnd()->GetStackSym();
- if (!sym->IsSingleDef())
+ IR::Instr* callLdInstr = nullptr;
+ IR::Instr* callTargetLdInstr = nullptr;
+ if (!TryGetCallApplyAndTargetLdInstrs(callInstr, &callLdInstr, &callTargetLdInstr))
{
return false;
}
- IR::Instr* callLdInstr = sym->GetInstrDef();
- Assert(callLdInstr);
- IR::Instr* callTargetLdInstr = callLdInstr->m_prev;
- if (callTargetLdInstr->m_opcode != Js::OpCode::LdFldForCallApplyTarget ||
- ((callTargetLdInstr->AsProfiledInstr()->u.FldInfo().flags & Js::FldInfoFlags::FldInfo_FromAccessor) != 0))
- {
- return false;
- }
-
- IR::Opnd* callTargetLdOpnd = callTargetLdInstr->GetSrc1();
- if (!callTargetLdOpnd->IsSymOpnd() || !callTargetLdOpnd->AsSymOpnd()->IsPropertySymOpnd())
+ const FunctionJITTimeInfo * inlineeData = nullptr;
+ Js::InlineCacheIndex inlineCacheIndex = 0;
+ IR::Instr * callbackDefInstr = nullptr;
+ if (!TryGetCallApplyInlineeData(inlinerData, callInstr, callLdInstr, callTargetLdInstr, &inlineeData, &inlineCacheIndex, &callbackDefInstr, isCallInstanceFunction))
{
return false;
}
- const auto inlineCacheIndex = callTargetLdOpnd->AsPropertySymOpnd()->m_inlineCacheIndex;
- const auto inlineeData = inlinerData->GetLdFldInlinee(inlineCacheIndex);
-
if (SkipCallApplyScriptTargetInlining_Shared(callInstr, inlinerData, inlineeData, /*isApplyTarget*/ false, /*isCallTarget*/ true))
{
*pInlineeData = inlineeData;
return false;
}
+ const bool targetIsCallback = callbackDefInstr != nullptr;
+
StackSym* originalCallTargetStackSym = callInstr->GetSrc1()->GetStackSym();
bool originalCallTargetOpndIsJITOpt = callInstr->GetSrc1()->GetIsJITOptimizedReg();
bool safeThis = false;
- if (!TryGetFixedMethodsForBuiltInAndTarget(callInstr, inlinerData, inlineeData, callFuncInfo, callLdInstr, callTargetLdInstr, safeThis, /*isApplyTarget*/ false))
+
+ if (!TryGetFixedMethodsForBuiltInAndTarget(callInstr, inlinerData, inlineeData, callFuncInfo, callLdInstr, callTargetLdInstr, safeThis, /*isApplyTarget*/ false, targetIsCallback))
{
return false;
}
- IR::Instr* implicitThisArgOut = nullptr;
- IR::Instr* explicitThisArgOut = nullptr;
+ IR::Instr * firstArgOut = nullptr;
+ IR::Instr * secondArgOut = nullptr;
+ IR::Instr * thirdArgOut = nullptr;
callInstr->IterateArgInstrs([&] (IR::Instr* argInstr)
{
- explicitThisArgOut = implicitThisArgOut;
- implicitThisArgOut = argInstr;
+ thirdArgOut = secondArgOut;
+ secondArgOut = firstArgOut;
+ firstArgOut = argInstr;
argInstr->GenerateBytecodeArgOutCapture(); // Generate BytecodeArgOutCapture here to capture the implicit "this" argout (which will be removed) as well,
// so that any bailout in the call sequence restores the argouts stack as the interpreter would expect it to be.
- argInstr->GetDst()->AsSymOpnd()->GetStackSym()->DecrementArgSlotNum(); // We will be removing implicit "this" argout
+
+ StackSym * argSym = argInstr->GetDst()->AsSymOpnd()->GetStackSym();
+ argSym->DecrementArgSlotNum(); // We will be removing implicit "this" argout
+
+ if (isCallInstanceFunction && argSym->GetArgSlotNum() != 0)
+ {
+ argSym->DecrementArgSlotNum(); // We will also be removing the function argout
+ }
+
return false;
});
+ IR::Instr * implicitThisArgOut = firstArgOut;
+ IR::Instr * explicitThisArgOut = isCallInstanceFunction ? thirdArgOut : secondArgOut;
+
Assert(explicitThisArgOut);
Assert(explicitThisArgOut->HasByteCodeArgOutCapture());
if (safeThis)
@@ -3210,16 +3314,28 @@ Inline::InlineCallTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inline
explicitThisArgOut->ReplaceSrc2(startCall->GetDst());
+ if (isCallInstanceFunction)
+ {
+ IR::Instr * functionArg = secondArgOut;
+ IR::Instr * bytecodeArgOutUse = IR::Instr::New(Js::OpCode::BytecodeArgOutUse, callInstr->m_func);
+ callInstr->ReplaceSrc1(functionArg->GetSrc1());
+ bytecodeArgOutUse->SetSrc1(functionArg->GetSrc1());
+ callInstr->InsertBefore(bytecodeArgOutUse); // Need to keep the function argout live till the call instruction for it to be captured by any bailout in the call sequence.
+ functionArg->Remove();
+ }
+
IR::Instr * bytecodeArgOutUse = IR::Instr::New(Js::OpCode::BytecodeArgOutUse, callInstr->m_func);
bytecodeArgOutUse->SetSrc1(implicitThisArgOut->GetSrc1());
callInstr->InsertBefore(bytecodeArgOutUse); // Need to keep the implicit "this" argout live till the call instruction for it to be captured by any bailout in the call sequence.
implicitThisArgOut->Remove();
startCall->SetSrc2(IR::IntConstOpnd::New(startCall->GetArgOutCount(/*getInterpreterArgOutCount*/ false), TyUint32, startCall->m_func));
- startCall->GetSrc1()->AsIntConstOpnd()->SetValue(startCall->GetSrc1()->AsIntConstOpnd()->GetValue() - 1);
+
+ uint argsRemoved = isCallInstanceFunction ? 2 : 1;
+ startCall->GetSrc1()->AsIntConstOpnd()->SetValue(startCall->GetSrc1()->AsIntConstOpnd()->GetValue() - argsRemoved);
*returnInstr = InlineCallApplyTarget_Shared(callInstr, originalCallTargetOpndIsJITOpt, originalCallTargetStackSym, inlineeData, inlineCacheIndex,
- safeThis, /*isApplyTarget*/ false, /*isCallTarget*/ true, recursiveInlineDepth);
+ safeThis, /*isApplyTarget*/ false, /*isCallTarget*/ true, callbackDefInstr, recursiveInlineDepth);
return true;
}
@@ -3276,7 +3392,7 @@ Inline::SkipCallApplyScriptTargetInlining_Shared(IR::Instr *callInstr, const Fun
bool
Inline::TryGetFixedMethodsForBuiltInAndTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo* inlineeData, const FunctionJITTimeInfo *builtInFuncInfo,
- IR::Instr* builtInLdInstr, IR::Instr* targetLdInstr, bool& safeThis, bool isApplyTarget)
+ IR::Instr* builtInLdInstr, IR::Instr* targetLdInstr, bool& safeThis, bool isApplyTarget, bool isCallback)
{
#if ENABLE_DEBUG_CONFIG_OPTIONS
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
@@ -3292,6 +3408,29 @@ Inline::TryGetFixedMethodsForBuiltInAndTarget(IR::Instr *callInstr, const Functi
IR::ByteCodeUsesInstr * useCallTargetInstr = IR::ByteCodeUsesInstr::New(callInstr);
+ if (isCallback)
+ {
+ IR::Opnd * functionOpnd = GetCallbackFunctionOpnd(callInstr);
+
+ // Emit Fixed Method check for apply/call
+ safeThis = false;
+ if (!TryOptimizeCallInstrWithFixedMethod(callInstr, builtInFuncInfo/*funcinfo for apply/call */, false /*isPolymorphic*/, true /*isBuiltIn*/, false /*isCtor*/, true /*isInlined*/, safeThis /*unused here*/))
+ {
+ callInstr->ReplaceSrc1(builtInLdInstr->GetDst());
+ INLINE_CALLBACKS_TRACE(_u("INLINING: Skip Inline: Skipping callback.%s target inlining, did not get fixed method for %s \tInlinee: %s (%s)\tCaller: %s\t(%s) \tTop Func:%s\t(%s)\n"), isApplyTarget ? _u("apply") : _u("call"), isApplyTarget ? _u("apply") : _u("call"),
+ inlineeData->GetBody()->GetDisplayName(), inlineeData->GetDebugNumberSet(debugStringBuffer),
+ inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer2),
+ this->topFunc->GetJITFunctionBody()->GetDisplayName(), this->topFunc->GetDebugNumberSet(debugStringBuffer3));
+ return false;
+ }
+ callInstr->m_opcode = originalCallOpCode;
+ callInstr->ReplaceSrc1(functionOpnd);
+
+ useCallTargetInstr->SetRemovedOpndSymbol(originalCallTargetOpndJITOpt, originalCallTargetStackSym->m_id);
+ callInstr->InsertBefore(useCallTargetInstr);
+ return true;
+ }
+
safeThis = false;
// Check if we can get fixed method for call
if (TryOptimizeCallInstrWithFixedMethod(callInstr, builtInFuncInfo/*funcinfo for call*/, false /*isPolymorphic*/, true /*isBuiltIn*/, false /*isCtor*/, true /*isInlined*/,
@@ -3305,7 +3444,7 @@ Inline::TryGetFixedMethodsForBuiltInAndTarget(IR::Instr *callInstr, const Functi
safeThis /*unused here*/, true /*dontOptimizeJustCheck*/))
{
callInstr->ReplaceSrc1(builtInLdInstr->GetDst());
- INLINE_TESTTRACE(_u("INLINING: Skip Inline: Skipping %s target inlining, did not get fixed method for %s target \tInlinee: %s (#%d)\tCaller: %s\t(#%d) \tTop Func:%s\t(#%d)\n"), isApplyTarget ? _u("apply") : _u("call"), isApplyTarget ? _u("apply") : _u("call"),
+ INLINE_TESTTRACE(_u("INLINING: Skip Inline: Skipping %s target inlining, did not get fixed method for %s target \tInlinee: %s (%s)\tCaller: %s\t(%s) \tTop Func:%s\t(%s)\n"), isApplyTarget ? _u("apply") : _u("call"), isApplyTarget ? _u("apply") : _u("call"),
inlineeData->GetBody()->GetDisplayName(), inlineeData->GetDebugNumberSet(debugStringBuffer),
inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer2),
this->topFunc->GetJITFunctionBody()->GetDisplayName(), this->topFunc->GetDebugNumberSet(debugStringBuffer3));
@@ -3314,7 +3453,7 @@ Inline::TryGetFixedMethodsForBuiltInAndTarget(IR::Instr *callInstr, const Functi
}
else
{
- INLINE_TESTTRACE(_u("INLINING: Skip Inline: Skipping %s target inlining, did not get fixed method for %s \tInlinee: %s (#%d)\tCaller: %s\t(#%d) \tTop Func:%s\t(#%d)\n"), isApplyTarget ? _u("apply") : _u("call"), isApplyTarget ? _u("apply") : _u("call"),
+ INLINE_TESTTRACE(_u("INLINING: Skip Inline: Skipping %s target inlining, did not get fixed method for %s \tInlinee: %s (%s)\tCaller: %s\t(%s) \tTop Func:%s\t(%s)\n"), isApplyTarget ? _u("apply") : _u("call"), isApplyTarget ? _u("apply") : _u("call"),
inlineeData->GetBody()->GetDisplayName(), inlineeData->GetDebugNumberSet(debugStringBuffer),
inlinerData->GetBody()->GetDisplayName(), inlinerData->GetDebugNumberSet(debugStringBuffer2),
this->topFunc->GetJITFunctionBody()->GetDisplayName(), this->topFunc->GetDebugNumberSet(debugStringBuffer3));
@@ -3849,6 +3988,7 @@ Inline::InlineFunctionCommon(IR::Instr *callInstr, bool originalCallTargetOpndIs
callInstr->m_opcode = Js::OpCode::InlineeStart;
// Set it to belong to the inlinee, so that we can use the actual count when lowering InlineeStart
+ inlinee->SetInlineeStart(callInstr);
callInstr->m_func = inlinee;
callInstr->SetDst(IR::RegOpnd::New(returnValueOpnd ? returnValueOpnd->GetType() : TyVar, inlinee));
// Put the meta arguments that the stack walker expects to find on the stack.
@@ -4207,8 +4347,6 @@ Inline::SplitConstructorCallCommon(
{
createObjInstr->SetByteCodeOffset(newObjInstr);
createObjInstr->GetSrc1()->SetIsJITOptimizedReg(true);
- // We're splitting a single byte code, so the interpreter has to resume from the beginning if we bail out.
- createObjInstr->forcePreOpBailOutIfNeeded = true;
newObjInstr->InsertBefore(createObjInstr);
createObjDst->SetValueType(ValueType::GetObject(ObjectType::UninitializedObject));
@@ -4305,21 +4443,21 @@ Inline::SplitConstructorCallCommon(
}
void
-Inline::InsertObjectCheck(IR::Instr *callInstr, IR::Instr* insertBeforeInstr, IR::Instr*bailOutIfNotObject)
+Inline::InsertObjectCheck(IR::RegOpnd * funcOpnd, IR::Instr* insertBeforeInstr, IR::Instr*bailOutIfNotObject)
{
// Bailout if 'functionRegOpnd' is not an object.
- bailOutIfNotObject->SetSrc1(callInstr->GetSrc1()->AsRegOpnd());
+ bailOutIfNotObject->SetSrc1(funcOpnd);
bailOutIfNotObject->SetByteCodeOffset(insertBeforeInstr);
insertBeforeInstr->InsertBefore(bailOutIfNotObject);
}
void
-Inline::InsertFunctionTypeIdCheck(IR::Instr *callInstr, IR::Instr* insertBeforeInstr, IR::Instr* bailOutIfNotJsFunction)
+Inline::InsertFunctionTypeIdCheck(IR::RegOpnd * funcOpnd, IR::Instr* insertBeforeInstr, IR::Instr* bailOutIfNotJsFunction)
{
// functionTypeRegOpnd = Ld functionRegOpnd->type
- IR::IndirOpnd *functionTypeIndirOpnd = IR::IndirOpnd::New(callInstr->GetSrc1()->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, callInstr->m_func);
+ IR::IndirOpnd *functionTypeIndirOpnd = IR::IndirOpnd::New(funcOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, insertBeforeInstr->m_func);
IR::RegOpnd *functionTypeRegOpnd = IR::RegOpnd::New(TyVar, this->topFunc);
- IR::Instr *instr = IR::Instr::New(Js::OpCode::Ld_A, functionTypeRegOpnd, functionTypeIndirOpnd, callInstr->m_func);
+ IR::Instr *instr = IR::Instr::New(Js::OpCode::Ld_A, functionTypeRegOpnd, functionTypeIndirOpnd, insertBeforeInstr->m_func);
if(instr->m_func->HasByteCodeOffset())
{
instr->SetByteCodeOffset(insertBeforeInstr);
@@ -4329,15 +4467,15 @@ Inline::InsertFunctionTypeIdCheck(IR::Instr *callInstr, IR::Instr* insertBeforeI
CompileAssert(sizeof(Js::TypeId) == sizeof(int32));
// if (functionTypeRegOpnd->typeId != TypeIds_Function) goto $noInlineLabel
// BrNeq_I4 $noInlineLabel, functionTypeRegOpnd->typeId, TypeIds_Function
- IR::IndirOpnd *functionTypeIdIndirOpnd = IR::IndirOpnd::New(functionTypeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, callInstr->m_func);
- IR::IntConstOpnd *typeIdFunctionConstOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, callInstr->m_func);
+ IR::IndirOpnd *functionTypeIdIndirOpnd = IR::IndirOpnd::New(functionTypeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, insertBeforeInstr->m_func);
+ IR::IntConstOpnd *typeIdFunctionConstOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, insertBeforeInstr->m_func);
bailOutIfNotJsFunction->SetSrc1(functionTypeIdIndirOpnd);
bailOutIfNotJsFunction->SetSrc2(typeIdFunctionConstOpnd);
insertBeforeInstr->InsertBefore(bailOutIfNotJsFunction);
}
void
-Inline::InsertJsFunctionCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind)
+Inline::InsertJsFunctionCheck(IR::Instr * callInstr, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind)
{
// This function only inserts bailout for tagged int & TypeIds_Function.
// As of now this is only used for polymorphic inlining.
@@ -4346,23 +4484,25 @@ Inline::InsertJsFunctionCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr
Assert(insertBeforeInstr);
Assert(insertBeforeInstr->m_func == callInstr->m_func);
+ IR::RegOpnd * funcOpnd = callInstr->GetSrc1()->AsRegOpnd();
+
// bailOutIfNotFunction is primary bailout instruction
IR::Instr* bailOutIfNotFunction = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, bailOutKind, insertBeforeInstr, callInstr->m_func);
- IR::Instr *bailOutIfNotObject = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, bailOutKind, bailOutIfNotFunction->GetBailOutInfo(),callInstr->m_func);
- InsertObjectCheck(callInstr, insertBeforeInstr, bailOutIfNotObject);
+ IR::Instr *bailOutIfNotObject = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, bailOutKind, bailOutIfNotFunction->GetBailOutInfo(), callInstr->m_func);
+ InsertObjectCheck(funcOpnd, insertBeforeInstr, bailOutIfNotObject);
- InsertFunctionTypeIdCheck(callInstr, insertBeforeInstr, bailOutIfNotFunction);
+ InsertFunctionTypeIdCheck(funcOpnd, insertBeforeInstr, bailOutIfNotFunction);
}
void
-Inline::InsertFunctionInfoCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo)
+Inline::InsertFunctionInfoCheck(IR::RegOpnd * funcOpnd, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo)
{
- // if (JavascriptFunction::FromVar(r1)->functionInfo != funcInfo) goto noInlineLabel
+ // if (VarTo(r1)->functionInfo != funcInfo) goto noInlineLabel
// BrNeq_I4 noInlineLabel, r1->functionInfo, funcInfo
- IR::IndirOpnd* opndFuncInfo = IR::IndirOpnd::New(callInstr->GetSrc1()->AsRegOpnd(), Js::JavascriptFunction::GetOffsetOfFunctionInfo(), TyMachPtr, callInstr->m_func);
- IR::AddrOpnd* inlinedFuncInfo = IR::AddrOpnd::New(funcInfo->GetFunctionInfoAddr(), IR::AddrOpndKindDynamicFunctionInfo, callInstr->m_func);
+ IR::IndirOpnd* opndFuncInfo = IR::IndirOpnd::New(funcOpnd, Js::JavascriptFunction::GetOffsetOfFunctionInfo(), TyMachPtr, insertBeforeInstr->m_func);
+ IR::AddrOpnd* inlinedFuncInfo = IR::AddrOpnd::New(funcInfo->GetFunctionInfoAddr(), IR::AddrOpndKindDynamicFunctionInfo, insertBeforeInstr->m_func);
bailoutInstr->SetSrc1(opndFuncInfo);
bailoutInstr->SetSrc2(inlinedFuncInfo);
@@ -4370,39 +4510,42 @@ Inline::InsertFunctionInfoCheck(IR::Instr *callInstr, IR::Instr *insertBeforeIns
}
void
-Inline::InsertFunctionObjectCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::Instr *bailOutInstr, const FunctionJITTimeInfo *funcInfo)
+Inline::InsertFunctionObjectCheck(IR::RegOpnd * funcOpnd, IR::Instr *insertBeforeInstr, IR::Instr *bailOutInstr, const FunctionJITTimeInfo *funcInfo)
{
Js::BuiltinFunction index = Js::JavascriptLibrary::GetBuiltInForFuncInfo(funcInfo->GetLocalFunctionId());
AssertMsg(index < Js::BuiltinFunction::Count, "Invalid built-in index on a call target marked as built-in");
- bailOutInstr->SetSrc1(callInstr->GetSrc1()->AsRegOpnd());
- bailOutInstr->SetSrc2(IR::IntConstOpnd::New(index, TyInt32, callInstr->m_func));
+ bailOutInstr->SetSrc1(funcOpnd);
+ bailOutInstr->SetSrc2(IR::IntConstOpnd::New(index, TyInt32, insertBeforeInstr->m_func));
insertBeforeInstr->InsertBefore(bailOutInstr);
}
IR::Instr *
-Inline::PrepareInsertionPoint(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind)
+Inline::PrepareInsertionPoint(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, IR::Instr *insertBeforeInstr)
{
Assert(insertBeforeInstr);
Assert(insertBeforeInstr->m_func == callInstr->m_func);
- Assert(bailOutKind == IR::BailOutOnInlineFunction);
+ IR::BailOutKind bailOutKind = IR::BailOutOnInlineFunction;
+
+ IR::RegOpnd * funcOpnd = callInstr->GetSrc1()->AsRegOpnd();
// FunctionBody check is the primary bailout instruction, create it first
IR::BailOutInstr* primaryBailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, bailOutKind, insertBeforeInstr, callInstr->m_func);
+ primaryBailOutInstr->SetByteCodeOffset(insertBeforeInstr);
// 1. Bailout if function object is not an object.
IR::Instr *bailOutIfNotObject = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject,
bailOutKind,
primaryBailOutInstr->GetBailOutInfo(),
callInstr->m_func);
- InsertObjectCheck(callInstr, insertBeforeInstr, bailOutIfNotObject);
+ InsertObjectCheck(funcOpnd, insertBeforeInstr, bailOutIfNotObject);
// 2. Bailout if function object is not a TypeId_Function
IR::Instr* bailOutIfNotJsFunction = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, bailOutKind, primaryBailOutInstr->GetBailOutInfo(), callInstr->m_func);
- InsertFunctionTypeIdCheck(callInstr, insertBeforeInstr, bailOutIfNotJsFunction);
+ InsertFunctionTypeIdCheck(funcOpnd, insertBeforeInstr, bailOutIfNotJsFunction);
// 3. Bailout if function body doesn't match funcInfo
- InsertFunctionInfoCheck(callInstr, insertBeforeInstr, primaryBailOutInstr, funcInfo);
+ InsertFunctionInfoCheck(funcOpnd, insertBeforeInstr, primaryBailOutInstr, funcInfo);
return primaryBailOutInstr;
}
@@ -4427,7 +4570,7 @@ Inline::EmitFixedMethodOrFunctionObjectChecksForBuiltIns(IR::Instr *callInstr, I
else
{
IR::BailOutInstr * bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotBuiltIn, IR::BailOutOnInlineFunction, callInstr, callInstr->m_func);
- InsertFunctionObjectCheck(callInstr, funcObjCheckInsertInstr, bailOutInstr, inlineeInfo);
+ InsertFunctionObjectCheck(callInstr->GetSrc1()->AsRegOpnd(), funcObjCheckInsertInstr, bailOutInstr, inlineeInfo);
}
return useCallTargetInstr;
}
@@ -5151,10 +5294,6 @@ Inline::MapFormals(Func *inlinee,
else
{
instr->SetSrc1(funcObjOpnd);
-
- // This usage doesn't correspond with any byte code register, since interpreter stack frames
- // get their function reference via this->function rather than from a register.
- instr->GetSrc1()->SetIsJITOptimizedReg(true);
}
}
else
diff --git a/deps/chakrashim/core/lib/Backend/Inline.h b/deps/chakrashim/core/lib/Backend/Inline.h
index f180fa66e9a..bfaa48408e5 100644
--- a/deps/chakrashim/core/lib/Backend/Inline.h
+++ b/deps/chakrashim/core/lib/Backend/Inline.h
@@ -72,19 +72,22 @@ class Inline
bool InlineApplyScriptTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo** pInlineeData, const FunctionJITTimeInfo * applyFuncInfo,
const StackSym *symThis, IR::Instr ** returnInstr, uint recursiveInlineDepth, bool isArrayOpndArgumentsObject, uint argsCount);
void GetArgInstrsForCallAndApply(IR::Instr* callInstr, IR::Instr** implicitThisArgOut, IR::Instr** explicitThisArgOut, IR::Instr** argumentsOrArrayArgOut, uint &argOutCount);
- _Success_(return != false) bool TryGetApplyAndTargetLdInstrs(IR::Instr * callInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyLdInstr, _Outptr_result_nullonfailure_ IR::Instr ** applyTargetLdInstr);
- IR::Instr * InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo * inlineeData, const FunctionJITTimeInfo * inlinerData, const StackSym *symThis, bool* pIsInlined, uint callSiteId, uint recursiveInlineDepth);
+ _Success_(return != false) bool TryGetCallApplyAndTargetLdInstrs(IR::Instr * callInstr, _Outptr_result_nullonfailure_ IR::Instr ** callApplyLdInstr, _Outptr_result_nullonfailure_ IR::Instr ** callApplyTargetLdInstr);
+ IR::Instr * InlineCall(IR::Instr *callInstr, const FunctionJITTimeInfo * inlineeData, const FunctionJITTimeInfo * inlinerData, const StackSym *symThis, bool* pIsInlined, uint callSiteId, uint recursiveInlineDepth, bool isCallInstanceFunction);
bool InlineCallTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo** pInlineeData, const FunctionJITTimeInfo *callFuncInfo,
- const StackSym *symThis, IR::Instr ** returnInstr, uint recursiveInlineDepth);
+ const StackSym *symThis, IR::Instr ** returnInstr, uint recursiveInlineDepth, bool isCallInstanceFunction);
+
+ bool TryGetCallApplyInlineeData(const FunctionJITTimeInfo* inlinerData, IR::Instr * callInstr, IR::Instr * callApplyLdInstr, IR::Instr * callApplyTargetLdInstr, const FunctionJITTimeInfo ** inlineeData, Js::InlineCacheIndex * inlineCacheIndex,
+ IR::Instr ** callbackDefInstr, bool isCallInstanceFunction);
bool InlConstFoldArg(IR::Instr *instr, __in_ecount_opt(callerArgOutCount) IR::Instr *callerArgOuts[], Js::ArgSlot callerArgOutCount);
bool InlConstFold(IR::Instr *instr, IntConstType *pValue, __in_ecount_opt(callerArgOutCount) IR::Instr *callerArgOuts[], Js::ArgSlot callerArgOutCount);
IR::Instr * InlineCallApplyTarget_Shared(IR::Instr *callInstr, bool originalCallTargetOpndIsJITOpt, StackSym* originalCallTargetStackSym, const FunctionJITTimeInfo*const inlineeData,
- uint inlineCacheIndex, bool safeThis, bool isApplyTarget, bool isCallTarget, uint recursiveInlineDepth);
+ uint inlineCacheIndex, bool safeThis, bool isApplyTarget, bool isCallTarget, IR::Instr * inlineeDefInstr, uint recursiveInlineDepth);
bool SkipCallApplyScriptTargetInlining_Shared(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo* inlineeData, bool isApplyTarget, bool isCallTarget);
bool TryGetFixedMethodsForBuiltInAndTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* inlinerData, const FunctionJITTimeInfo* inlineeData, const FunctionJITTimeInfo *builtInFuncInfo,
- IR::Instr* builtInLdInstr, IR::Instr* targetLdInstr, bool& safeThis, bool isApplyTarget);
+ IR::Instr* builtInLdInstr, IR::Instr* targetLdInstr, bool& safeThis, bool isApplyTarget, bool isCallback);
IR::Instr * InlineBuiltInFunction(IR::Instr *callInstr, const FunctionJITTimeInfo * inlineeData, Js::OpCode inlineCallOpCode, const FunctionJITTimeInfo * inlinerData, const StackSym *symCallerThis, bool* pIsInlined, uint profileId, uint recursiveInlineDepth);
IR::Instr * InlineFunc(IR::Instr *callInstr, const FunctionJITTimeInfo *const inlineeData, const uint profileId);
@@ -93,7 +96,12 @@ class Inline
const bool isInlined, const bool doneFixedMethodFld, IR::Instr** createObjInstrOut, IR::Instr** callCtorInstrOut) const;
IR::Instr * InlinePolymorphicFunction(IR::Instr *callInstr, const FunctionJITTimeInfo * inlinerData, const StackSym *symCallerThis, const Js::ProfileId profileId, bool* pIsInlined, uint recursiveInlineDepth, bool triedUsingFixedMethods = false);
IR::Instr * InlinePolymorphicFunctionUsingFixedMethods(IR::Instr *callInstr, const FunctionJITTimeInfo * inlinerData, const StackSym *symCallerThis, const Js::ProfileId profileId, IR::PropertySymOpnd* methodValueOpnd, bool* pIsInlined, uint recursiveInlineDepth);
- IR::Instr * TryGetCallbackDefInstr(IR::Instr * callInstr);
+
+ IR::RegOpnd * GetCallbackFunctionOpnd(IR::Instr * callInstr);
+ IR::Instr * TryGetCallbackDefInstr(StackSym * callbackSym);
+ IR::Instr * TryGetCallbackDefInstrForCallInstr(IR::Instr * callInstr);
+ IR::Instr * TryGetCallbackDefInstrForCallApplyTarget(IR::Instr * callApplyLdInstr);
+ IR::Instr * TryGetCallbackDefInstrForCallInstanceFunction(IR::Instr * callInstr);
IR::Instr * InlineSpread(IR::Instr *spreadCall);
@@ -102,7 +110,7 @@ class Inline
void SetupInlineeFrame(Func *inlinee, IR::Instr *inlineeStart, Js::ArgSlot actualCount, IR::Opnd *functionObject);
void FixupExtraActualParams(IR::Instr * instr, IR::Instr *argOuts[], IR::Instr *argOutsExtra[], uint index, uint actualCount, Js::ProfileId callSiteId);
void RemoveExtraFixupArgouts(IR::Instr* instr, uint argoutRemoveCount, Js::ProfileId callSiteId);
- IR::Instr* PrepareInsertionPoint(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind = IR::BailOutOnInlineFunction);
+ IR::Instr* PrepareInsertionPoint(IR::Instr *callInstr, const FunctionJITTimeInfo *funcInfo, IR::Instr *insertBeforeInstr);
IR::ByteCodeUsesInstr* EmitFixedMethodOrFunctionObjectChecksForBuiltIns(IR::Instr *callInstr, IR::Instr * funcObjCheckInsertInstr, const FunctionJITTimeInfo * inlineeInfo, bool isPolymorphic, bool isBuiltIn, bool isCtor, bool isInlined);
Js::ArgSlot MapActuals(IR::Instr *callInstr, __out_ecount(maxParamCount) IR::Instr *argOuts[], Js::ArgSlot formalCount, Func *inlinee, Js::ProfileId callSiteId, bool *stackArgsArgOutExpanded, IR::Instr *argOutsExtra[] = nullptr, Js::ArgSlot maxParamCount = Js::InlineeCallInfo::MaxInlineeArgoutCount);
uint32 CountActuals(IR::Instr *callIntr);
@@ -145,11 +153,11 @@ class Inline
void SetInlineeFrameStartSym(Func *inlinee, uint actualCount);
void CloneCallSequence(IR::Instr* callInstr, IR::Instr* clonedCallInstr);
- void InsertObjectCheck(IR::Instr *callInstr, IR::Instr* insertBeforeInstr, IR::Instr*bailOutInstr);
- void InsertFunctionTypeIdCheck(IR::Instr *callInstr, IR::Instr* insertBeforeInstr, IR::Instr*bailOutInstr);
- void InsertJsFunctionCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind);
- void InsertFunctionInfoCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo);
- void InsertFunctionObjectCheck(IR::Instr *callInstr, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo);
+ void InsertObjectCheck(IR::RegOpnd * funcOpnd, IR::Instr* insertBeforeInstr, IR::Instr*bailOutInstr);
+ void InsertFunctionTypeIdCheck(IR::RegOpnd * funcOpnd, IR::Instr* insertBeforeInstr, IR::Instr*bailOutInstr);
+ void InsertJsFunctionCheck(IR::Instr * callInstr, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind);
+ void InsertFunctionInfoCheck(IR::RegOpnd * funcOpnd, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo);
+ void InsertFunctionObjectCheck(IR::RegOpnd * funcOpnd, IR::Instr *insertBeforeInstr, IR::Instr* bailoutInstr, const FunctionJITTimeInfo *funcInfo);
void TryResetObjTypeSpecFldInfoOn(IR::PropertySymOpnd* propertySymOpnd);
void TryDisableRuntimePolymorphicCacheOn(IR::PropertySymOpnd* propertySymOpnd);
diff --git a/deps/chakrashim/core/lib/Backend/InlineeFrameInfo.cpp b/deps/chakrashim/core/lib/Backend/InlineeFrameInfo.cpp
index 4ac71f94bc3..706f5cae942 100644
--- a/deps/chakrashim/core/lib/Backend/InlineeFrameInfo.cpp
+++ b/deps/chakrashim/core/lib/Backend/InlineeFrameInfo.cpp
@@ -211,9 +211,9 @@ void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLay
BAILOUT_VERBOSE_TRACE(functionBody, _u("Restore function object: "));
// No deepCopy needed for just the function
Js::Var varFunction = this->Restore(this->functionOffset, /*isFloat64*/ false, /*isInt32*/ false, layout, functionBody, boxValues);
- Assert(Js::ScriptFunction::Is(varFunction));
+ Assert(Js::VarIs(varFunction));
- Js::ScriptFunction* function = Js::ScriptFunction::FromVar(varFunction);
+ Js::ScriptFunction* function = Js::VarTo(varFunction);
BAILOUT_VERBOSE_TRACE(functionBody, _u("Inlinee: %s [%d.%d] \n"), function->GetFunctionBody()->GetDisplayName(), function->GetFunctionBody()->GetSourceContextId(), function->GetFunctionBody()->GetLocalFunctionId());
inlinedFrame->function = function;
@@ -230,7 +230,7 @@ void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLay
#if DBG
if (boxValues && !Js::TaggedNumber::Is(var))
{
- Js::RecyclableObject *const recyclableObject = Js::RecyclableObject::FromVar(var);
+ Js::RecyclableObject *const recyclableObject = Js::VarTo(var);
Assert(!ThreadContext::IsOnStack(recyclableObject));
}
#endif
diff --git a/deps/chakrashim/core/lib/Backend/InliningDecider.cpp b/deps/chakrashim/core/lib/Backend/InliningDecider.cpp
index 2f21acd5cfa..f643b0b7d6a 100644
--- a/deps/chakrashim/core/lib/Backend/InliningDecider.cpp
+++ b/deps/chakrashim/core/lib/Backend/InliningDecider.cpp
@@ -553,6 +553,9 @@ bool InliningDecider::GetBuiltInInfoCommon(
case Js::JavascriptBuiltInFunction::JavascriptFunction_Call:
*inlineCandidateOpCode = Js::OpCode::InlineFunctionCall;
break;
+ case Js::JavascriptBuiltInFunction::EngineInterfaceObject_CallInstanceFunction:
+ *inlineCandidateOpCode = Js::OpCode::InlineCallInstanceFunction;
+ break;
// The following are not currently inlined, but are tracked for their return type
// TODO: Add more built-ins that return objects. May consider tracking all built-ins.
diff --git a/deps/chakrashim/core/lib/Backend/InliningDecider.h b/deps/chakrashim/core/lib/Backend/InliningDecider.h
index 2a15a64da24..eb67ec80227 100644
--- a/deps/chakrashim/core/lib/Backend/InliningDecider.h
+++ b/deps/chakrashim/core/lib/Backend/InliningDecider.h
@@ -135,6 +135,12 @@ class InliningDecider
{ \
Output::Flush(); \
}
+#define INLINE_CALLBACKS_TRACE(...) \
+ if (PHASE_TESTTRACE(Js::InlineCallbacksPhase, this->topFunc) || PHASE_TRACE(Js::InlineCallbacksPhase, this->topFunc)) \
+ { \
+ Output::Print(__VA_ARGS__); \
+ Output::Flush(); \
+ }
#else
#define INLINE_VERBOSE_TRACE(...)
#define POLYMORPHIC_INLINE_TESTTRACE(...)
@@ -143,4 +149,6 @@ class InliningDecider
#define INLINE_FLUSH()
#define INLINE_TESTTRACE(...)
#define INLINE_TESTTRACE_VERBOSE(...)
+#define INLINE_TRACE_AND_TESTTRACE(...)
+#define INLINE_CALLBACKS_TRACE(...)
#endif
diff --git a/deps/chakrashim/core/lib/Backend/JITOutput.cpp b/deps/chakrashim/core/lib/Backend/JITOutput.cpp
index 2511e9cde72..dee344d47f9 100644
--- a/deps/chakrashim/core/lib/Backend/JITOutput.cpp
+++ b/deps/chakrashim/core/lib/Backend/JITOutput.cpp
@@ -65,12 +65,6 @@ JITOutput::IsTrackCompoundedIntOverflowDisabled() const
return m_outputData->disableTrackCompoundedIntOverflow != FALSE;
}
-bool
-JITOutput::IsMemOpDisabled() const
-{
- return m_outputData->disableMemOp != FALSE;
-}
-
bool
JITOutput::IsArrayCheckHoistDisabled() const
{
diff --git a/deps/chakrashim/core/lib/Backend/JITOutput.h b/deps/chakrashim/core/lib/Backend/JITOutput.h
index 10e7a442f11..85e9b3c1fa2 100644
--- a/deps/chakrashim/core/lib/Backend/JITOutput.h
+++ b/deps/chakrashim/core/lib/Backend/JITOutput.h
@@ -22,7 +22,6 @@ class JITOutput
void RecordXData(BYTE * xdata);
#endif
bool IsTrackCompoundedIntOverflowDisabled() const;
- bool IsMemOpDisabled() const;
bool IsArrayCheckHoistDisabled() const;
bool IsStackArgOptDisabled() const;
bool IsSwitchOptDisabled() const;
diff --git a/deps/chakrashim/core/lib/Backend/JITType.cpp b/deps/chakrashim/core/lib/Backend/JITType.cpp
index a235bd02d31..a207dc5ffcf 100644
--- a/deps/chakrashim/core/lib/Backend/JITType.cpp
+++ b/deps/chakrashim/core/lib/Backend/JITType.cpp
@@ -35,7 +35,7 @@ JITType::BuildFromJsType(__in Js::Type * jsType, __out JITType * jitType)
Js::DynamicTypeHandler * handler = dynamicType->GetTypeHandler();
data->handler.isObjectHeaderInlinedTypeHandler = handler->IsObjectHeaderInlinedTypeHandler();
- data->handler.flags = handler->GetFlags();
+ data->handler.isLocked = handler->GetIsLocked();
data->handler.inlineSlotCapacity = handler->GetInlineSlotCapacity();
data->handler.offsetOfInlineSlots = handler->GetOffsetOfInlineSlots();
data->handler.slotCapacity = handler->GetSlotCapacity();
diff --git a/deps/chakrashim/core/lib/Backend/JITTypeHandler.cpp b/deps/chakrashim/core/lib/Backend/JITTypeHandler.cpp
index dbecfb9dafa..aca726bb060 100644
--- a/deps/chakrashim/core/lib/Backend/JITTypeHandler.cpp
+++ b/deps/chakrashim/core/lib/Backend/JITTypeHandler.cpp
@@ -19,13 +19,7 @@ JITTypeHandler::IsObjectHeaderInlinedTypeHandler() const
bool
JITTypeHandler::IsLocked() const
{
- return Js::DynamicTypeHandler::GetIsLocked(m_data.flags);
-}
-
-bool
-JITTypeHandler::IsPrototype() const
-{
- return Js::DynamicTypeHandler::GetIsPrototype(m_data.flags);
+ return m_data.isLocked != FALSE;
}
uint16
diff --git a/deps/chakrashim/core/lib/Backend/JITTypeHandler.h b/deps/chakrashim/core/lib/Backend/JITTypeHandler.h
index 9d2cc35bb93..c6e909fd7a2 100644
--- a/deps/chakrashim/core/lib/Backend/JITTypeHandler.h
+++ b/deps/chakrashim/core/lib/Backend/JITTypeHandler.h
@@ -12,7 +12,6 @@ class JITTypeHandler
bool IsObjectHeaderInlinedTypeHandler() const;
bool IsLocked() const;
- bool IsPrototype() const;
uint16 GetInlineSlotCapacity() const;
uint16 GetOffsetOfInlineSlots() const;
diff --git a/deps/chakrashim/core/lib/Backend/JitTransferData.cpp b/deps/chakrashim/core/lib/Backend/JitTransferData.cpp
index 3c99866b441..bb50778ae59 100644
--- a/deps/chakrashim/core/lib/Backend/JitTransferData.cpp
+++ b/deps/chakrashim/core/lib/Backend/JitTransferData.cpp
@@ -115,4 +115,4 @@ void JitTransferData::Cleanup()
}
midl_user_free(entries);
}
-}
\ No newline at end of file
+}
diff --git a/deps/chakrashim/core/lib/Backend/JitTransferData.h b/deps/chakrashim/core/lib/Backend/JitTransferData.h
index 0daf45e4a07..2cc8179b73f 100644
--- a/deps/chakrashim/core/lib/Backend/JitTransferData.h
+++ b/deps/chakrashim/core/lib/Backend/JitTransferData.h
@@ -111,4 +111,4 @@ class JitTransferData
void Cleanup();
private:
void EnsureJitTimeTypeRefs(Recycler* recycler);
-};
\ No newline at end of file
+};
diff --git a/deps/chakrashim/core/lib/Backend/JnHelperMethodList.h b/deps/chakrashim/core/lib/Backend/JnHelperMethodList.h
index 17a4adfc332..a07f4f9bfcb 100644
--- a/deps/chakrashim/core/lib/Backend/JnHelperMethodList.h
+++ b/deps/chakrashim/core/lib/Backend/JnHelperMethodList.h
@@ -66,6 +66,7 @@ HELPERCALLCHK(Op_LdElemUndefDynamic, Js::JavascriptOperators::OP_LoadUndefinedTo
HELPERCALLCHK(Op_LdElemUndefScoped, Js::JavascriptOperators::OP_LoadUndefinedToElementScoped, AttrCanNotBeReentrant)
HELPERCALLCHK(Op_EnsureNoRootProperty, Js::JavascriptOperators::OP_EnsureNoRootProperty, AttrCanThrow | AttrCanNotBeReentrant)
HELPERCALLCHK(Op_EnsureNoRootRedeclProperty, Js::JavascriptOperators::OP_EnsureNoRootRedeclProperty, AttrCanThrow | AttrCanNotBeReentrant)
+HELPERCALLCHK(Op_EnsureCanDeclGloFunc, Js::JavascriptOperators::OP_EnsureCanDeclGloFunc, AttrCanThrow | AttrCanNotBeReentrant)
HELPERCALLCHK(Op_EnsureNoRedeclPropertyScoped, Js::JavascriptOperators::OP_ScopedEnsureNoRedeclProperty, AttrCanThrow | AttrCanNotBeReentrant)
HELPERCALLCHK(Op_ToSpreadedFunctionArgument, Js::JavascriptOperators::OP_LdCustomSpreadIteratorList, AttrCanThrow)
@@ -73,8 +74,8 @@ HELPERCALLCHK(Op_ConvObject, Js::JavascriptOperators::ToObject, AttrCanThrow | A
HELPERCALLCHK(Op_NewUnscopablesWrapperObject, Js::JavascriptOperators::ToUnscopablesWrapperObject, AttrCanThrow | AttrCanNotBeReentrant)
HELPERCALLCHK(SetComputedNameVar, Js::JavascriptOperators::OP_SetComputedNameVar, AttrCanNotBeReentrant)
HELPERCALLCHK(Op_UnwrapWithObj, Js::JavascriptOperators::OP_UnwrapWithObj, AttrCanNotBeReentrant)
-HELPERCALLCHK(Op_ConvNumber_Full, Js::JavascriptOperators::ToNumber, AttrCanThrow)
-HELPERCALLCHK(Op_ConvNumberInPlace, Js::JavascriptOperators::ToNumberInPlace, AttrCanThrow)
+HELPERCALLCHK(Op_ConvNumber_Full, Js::JavascriptOperators::ToNumeric, AttrCanThrow)
+HELPERCALLCHK(Op_ConvNumberInPlace, Js::JavascriptOperators::ToNumericInPlace, AttrCanThrow)
HELPERCALLCHK(Op_ConvNumber_Helper, Js::JavascriptConversion::ToNumber_Helper, 0)
HELPERCALLCHK(Op_ConvFloat_Helper, Js::JavascriptConversion::ToFloat_Helper, 0)
HELPERCALLCHK(Op_ConvNumber_FromPrimitive, Js::JavascriptConversion::ToNumber_FromPrimitive, 0)
@@ -134,7 +135,6 @@ HELPERCALL_MATH(Op_MaxInAnArray, Js::JavascriptMath::MaxInAnArray, AttrCanThrow)
HELPERCALL_MATH(Op_MinInAnArray, Js::JavascriptMath::MinInAnArray, AttrCanThrow)
HELPERCALLCHK(Op_ConvString, Js::JavascriptConversion::ToString, AttrCanThrow)
-HELPERCALLCHK(Op_ConvPropertyKey, Js::JavascriptOperators::OP_ToPropertyKey, AttrCanThrow)
HELPERCALLCHK(Op_CoerseString, Js::JavascriptConversion::CoerseString, AttrCanThrow)
HELPERCALLCHK(Op_CoerseRegex, (Js::JavascriptRegExp* (*) (Js::Var aValue, Js::Var options, Js::ScriptContext *scriptContext))Js::JavascriptRegExp::CreateRegEx, AttrCanThrow)
@@ -206,12 +206,6 @@ HELPERCALLCHK(Op_SetNativeIntElementI_Int32, Js::JavascriptOperators::OP_SetNati
HELPERCALLCHK(Op_SetNativeFloatElementI_Int32, Js::JavascriptOperators::OP_SetNativeFloatElementI_Int32, AttrCanThrow)
HELPERCALLCHK(Op_SetNativeIntElementI_UInt32, Js::JavascriptOperators::OP_SetNativeIntElementI_UInt32, AttrCanThrow)
HELPERCALLCHK(Op_SetNativeFloatElementI_UInt32, Js::JavascriptOperators::OP_SetNativeFloatElementI_UInt32, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeIntElementI_NoConvert, Js::JavascriptOperators::OP_SetNativeIntElementI_NoConvert, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeFloatElementI_NoConvert, Js::JavascriptOperators::OP_SetNativeFloatElementI_NoConvert, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeIntElementI_Int32_NoConvert, Js::JavascriptOperators::OP_SetNativeIntElementI_Int32_NoConvert, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeFloatElementI_Int32_NoConvert, Js::JavascriptOperators::OP_SetNativeFloatElementI_Int32_NoConvert, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeIntElementI_UInt32_NoConvert, Js::JavascriptOperators::OP_SetNativeIntElementI_UInt32_NoConvert, AttrCanThrow)
-HELPERCALLCHK(Op_SetNativeFloatElementI_UInt32_NoConvert, Js::JavascriptOperators::OP_SetNativeFloatElementI_UInt32_NoConvert, AttrCanThrow)
HELPERCALLCHK(ScrArr_SetNativeIntElementC, Js::JavascriptArray::OP_SetNativeIntElementC, AttrCanNotBeReentrant)
HELPERCALLCHK(ScrArr_SetNativeFloatElementC, Js::JavascriptArray::OP_SetNativeFloatElementC, AttrCanNotBeReentrant)
HELPERCALLCHK(Op_DeleteElementI, Js::JavascriptOperators::OP_DeleteElementI, AttrCanThrow)
@@ -490,8 +484,8 @@ HELPERCALL(String_ToLocaleUpperCase, Js::JavascriptString::EntryToLocaleUpperCas
HELPERCALL(String_ToLowerCase, Js::JavascriptString::EntryToLowerCase, 0)
HELPERCALL(String_ToUpperCase, Js::JavascriptString::EntryToUpperCase, 0)
HELPERCALL(String_Trim, Js::JavascriptString::EntryTrim, 0)
-HELPERCALL(String_TrimLeft, Js::JavascriptString::EntryTrimLeft, 0)
-HELPERCALL(String_TrimRight, Js::JavascriptString::EntryTrimRight, 0)
+HELPERCALL(String_TrimLeft, Js::JavascriptString::EntryTrimStart, 0)
+HELPERCALL(String_TrimRight, Js::JavascriptString::EntryTrimEnd, 0)
HELPERCALL(String_GetSz, Js::JavascriptString::GetSzHelper, 0)
HELPERCALL(String_PadStart, Js::JavascriptString::EntryPadStart, 0)
HELPERCALL(String_PadEnd, Js::JavascriptString::EntryPadEnd, 0)
@@ -518,6 +512,12 @@ HELPERCALL(EnsureFunctionProxyDeferredPrototypeType, &Js::FunctionProxy::EnsureF
HELPERCALL(SpreadArrayLiteral, Js::JavascriptArray::SpreadArrayArgs, 0)
HELPERCALL(SpreadCall, Js::JavascriptFunction::EntrySpreadCall, 0)
+HELPERCALL(SpreadObjectLiteral, Js::JavascriptObject::SpreadObjectLiteral, 0)
+HELPERCALL(Restify, Js::JavascriptObject::Restify, 0)
+HELPERCALL(NewPropIdArrForCompProps, Js::InterpreterStackFrame::OP_NewPropIdArrForCompProps, AttrCanNotBeReentrant)
+HELPERCALL(StPropIdArrFromVar, Js::InterpreterStackFrame::OP_StPropIdArrFromVar, 0)
+
+
HELPERCALLCHK(LdHomeObj, Js::JavascriptOperators::OP_LdHomeObj, AttrCanNotBeReentrant)
HELPERCALLCHK(LdFuncObj, Js::JavascriptOperators::OP_LdFuncObj, AttrCanNotBeReentrant)
HELPERCALLCHK(SetHomeObj, Js::JavascriptOperators::OP_SetHomeObj, AttrCanNotBeReentrant)
diff --git a/deps/chakrashim/core/lib/Backend/LinearScan.cpp b/deps/chakrashim/core/lib/Backend/LinearScan.cpp
index 0221ea1d410..d4e45b7dacb 100644
--- a/deps/chakrashim/core/lib/Backend/LinearScan.cpp
+++ b/deps/chakrashim/core/lib/Backend/LinearScan.cpp
@@ -1417,7 +1417,7 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
memset(state.registerSaveSyms, 0, sizeof(state.registerSaveSyms));
// Fill in the constants
- FOREACH_SLISTBASE_ENTRY_EDITING(ConstantStackSymValue, value, &bailOutInfo->usedCapturedValues.constantValues, constantValuesIterator)
+ FOREACH_SLISTBASE_ENTRY_EDITING(ConstantStackSymValue, value, &bailOutInfo->usedCapturedValues->constantValues, constantValuesIterator)
{
AssertMsg(bailOutInfo->bailOutRecord->bailOutKind != IR::BailOutForGeneratorYield, "constant prop syms unexpected for bail-in for generator yield");
StackSym * stackSym = value.Key();
@@ -1460,7 +1460,7 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
NEXT_SLISTBASE_ENTRY_EDITING;
// Fill in the copy prop syms
- FOREACH_SLISTBASE_ENTRY_EDITING(CopyPropSyms, copyPropSyms, &bailOutInfo->usedCapturedValues.copyPropSyms, copyPropSymsIter)
+ FOREACH_SLISTBASE_ENTRY_EDITING(CopyPropSyms, copyPropSyms, &bailOutInfo->usedCapturedValues->copyPropSyms, copyPropSymsIter)
{
AssertMsg(bailOutInfo->bailOutRecord->bailOutKind != IR::BailOutForGeneratorYield, "copy prop syms unexpected for bail-in for generator yield");
StackSym * stackSym = copyPropSyms.Key();
@@ -1513,9 +1513,9 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
}
NEXT_BITSET_IN_SPARSEBV;
- if (bailOutInfo->usedCapturedValues.argObjSyms)
+ if (bailOutInfo->usedCapturedValues->argObjSyms)
{
- FOREACH_BITSET_IN_SPARSEBV(id, bailOutInfo->usedCapturedValues.argObjSyms)
+ FOREACH_BITSET_IN_SPARSEBV(id, bailOutInfo->usedCapturedValues->argObjSyms)
{
StackSym * stackSym = this->func->m_symTable->FindStackSym(id);
Assert(stackSym != nullptr);
@@ -1705,7 +1705,7 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
uint outParamOffsetIndex = outParamStart + argSlot;
if (!sym->m_isBailOutReferenced && !sym->IsArgSlotSym())
{
- FOREACH_SLISTBASE_ENTRY_EDITING(ConstantStackSymValue, constantValue, &bailOutInfo->usedCapturedValues.constantValues, iterator)
+ FOREACH_SLISTBASE_ENTRY_EDITING(ConstantStackSymValue, constantValue, &bailOutInfo->usedCapturedValues->constantValues, iterator)
{
if (constantValue.Key()->m_id == sym->m_id)
{
@@ -1731,13 +1731,13 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
continue;
}
- FOREACH_SLISTBASE_ENTRY_EDITING(CopyPropSyms, copyPropSym, &bailOutInfo->usedCapturedValues.copyPropSyms, iter)
+ FOREACH_SLISTBASE_ENTRY_EDITING(CopyPropSyms, copyPropSym, &bailOutInfo->usedCapturedValues->copyPropSyms, iter)
{
if (copyPropSym.Key()->m_id == sym->m_id)
{
StackSym * copyStackSym = copyPropSym.Value();
- BVSparse* argObjSyms = bailOutInfo->usedCapturedValues.argObjSyms;
+ BVSparse* argObjSyms = bailOutInfo->usedCapturedValues->argObjSyms;
if (argObjSyms && argObjSyms->Test(copyStackSym->m_id))
{
outParamOffsets[outParamOffsetIndex] = BailOutRecord::GetArgumentsObjectOffset();
@@ -1845,7 +1845,7 @@ LinearScan::FillBailOutRecord(IR::Instr * instr)
Assert(LowererMD::IsAssign(instrDef));
}
- if (bailOutInfo->usedCapturedValues.argObjSyms && bailOutInfo->usedCapturedValues.argObjSyms->Test(sym->m_id))
+ if (bailOutInfo->usedCapturedValues->argObjSyms && bailOutInfo->usedCapturedValues->argObjSyms->Test(sym->m_id))
{
//foo.apply(this,arguments) case and we bailout when the apply is overridden. We need to restore the arguments object.
outParamOffsets[outParamOffsetIndex] = BailOutRecord::GetArgumentsObjectOffset();
diff --git a/deps/chakrashim/core/lib/Backend/Lower.cpp b/deps/chakrashim/core/lib/Backend/Lower.cpp
index a4f2bb45071..b561fc5e0af 100644
--- a/deps/chakrashim/core/lib/Backend/Lower.cpp
+++ b/deps/chakrashim/core/lib/Backend/Lower.cpp
@@ -468,6 +468,7 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
break;
case Js::OpCode::AdjustObjType:
+ case Js::OpCode::AdjustObjTypeReloadAuxSlotPtr:
this->LowerAdjustObjType(instr);
break;
@@ -987,10 +988,12 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
this->LowerUnaryHelperMem(instr, IR::HelperOp_ToSpreadedFunctionArgument);
break;
+ case Js::OpCode::Conv_Numeric:
case Js::OpCode::Conv_Num:
this->LowerConvNum(instr, noMathFastPath);
break;
+ case Js::OpCode::Incr_Num_A:
case Js::OpCode::Incr_A:
if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath)
{
@@ -1005,6 +1008,7 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
}
break;
+ case Js::OpCode::Decr_Num_A:
case Js::OpCode::Decr_A:
if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath)
{
@@ -2490,6 +2494,10 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
this->LowerElementUndefined(instr, IR::HelperOp_EnsureNoRootRedeclProperty);
break;
+ case Js::OpCode::EnsureCanDeclGloFunc:
+ this->LowerElementUndefined(instr, IR::HelperOp_EnsureCanDeclGloFunc);
+ break;
+
case Js::OpCode::ScopedEnsureNoRedeclFld:
this->LowerElementUndefinedScoped(instr, IR::HelperOp_EnsureNoRedeclPropertyScoped);
break;
@@ -2826,10 +2834,6 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
this->LowerConvPrimStr(instr);
break;
- case Js::OpCode::Conv_Prop:
- this->LowerConvPropertyKey(instr);
- break;
-
case Js::OpCode::ClearAttributes:
this->LowerBinaryHelper(instr, IR::HelperOP_ClearAttributes);
break;
@@ -3094,6 +3098,22 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
break;
}
+ case Js::OpCode::SpreadObjectLiteral:
+ this->LowerBinaryHelperMem(instr, IR::HelperSpreadObjectLiteral);
+ break;
+
+ case Js::OpCode::Restify:
+ instrPrev = this->LowerRestify(instr);
+ break;
+
+ case Js::OpCode::NewPropIdArrForCompProps:
+ this->LowerUnaryHelperMem(instr, IR::HelperNewPropIdArrForCompProps);
+ break;
+
+ case Js::OpCode::StPropIdArrFromVar:
+ instrPrev = this->LowerStPropIdArrFromVar(instr);
+ break;
+
default:
#ifdef ENABLE_WASM_SIMD
if (IsSimd128Opcode(instr->m_opcode))
@@ -3737,6 +3757,22 @@ Lowerer::GenerateProfiledNewScArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteI
IR::RegOpnd *headOpnd;
uint32 i = length;
+ auto fillMissingItems = [&](IRType type, uint missingItemCount, uint offsetStart, uint itemSpacing)
+ {
+ IR::Opnd * missingItemOpnd = GetMissingItemOpnd(type, func);
+#if _M_ARM32_OR_ARM64
+ IR::Instr * move = this->InsertMove(IR::RegOpnd::New(type, instr->m_func), missingItemOpnd, instr);
+ missingItemOpnd = move->GetDst();
+#endif
+ const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func);
+ const IR::AutoReuseOpnd autoReuseMissingItemOpnd(missingItemOpnd, func);
+
+ for (; i < missingItemCount; i++)
+ {
+ GenerateMemInit(headOpnd, offsetStart + i * itemSpacing, missingItemOpnd, instr, isZeroed);
+ }
+ };
+
if (instr->GetDst() && instr->GetDst()->GetValueType().IsLikelyNativeIntArray())
{
if (!IsSmallObject(length))
@@ -3746,14 +3782,10 @@ Lowerer::GenerateProfiledNewScArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteI
GenerateArrayInfoIsNativeIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel);
Assert(Js::JavascriptNativeIntArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex());
headOpnd = GenerateArrayLiteralsAlloc(instr, &size, arrayInfo, &isZeroed);
- const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func);
GenerateMemInit(dstOpnd, Js::JavascriptNativeIntArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed);
- for (; i < size; i++)
- {
- GenerateMemInit(headOpnd, sizeof(Js::SparseArraySegmentBase) + i * sizeof(int32),
- Js::JavascriptNativeIntArray::MissingItem, instr, isZeroed);
- }
+
+ fillMissingItems(TyInt32, size, sizeof(Js::SparseArraySegmentBase), sizeof(int32));
}
else if (instr->GetDst() && instr->GetDst()->GetValueType().IsLikelyNativeFloatArray())
{
@@ -3764,18 +3796,14 @@ Lowerer::GenerateProfiledNewScArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteI
GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel);
Assert(Js::JavascriptNativeFloatArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex());
headOpnd = GenerateArrayLiteralsAlloc(instr, &size, arrayInfo, &isZeroed);
- const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func);
GenerateMemInit(dstOpnd, Js::JavascriptNativeFloatArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed);
- // Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit.
+
uint const offsetStart = sizeof(Js::SparseArraySegmentBase);
- for (; i < size; i++)
- {
- GenerateMemInit(
- headOpnd, offsetStart + i * sizeof(double),
- GetMissingItemOpndForAssignment(TyFloat64, m_func),
- instr, isZeroed);
- }
+ uint const missingItemCount = size * sizeof(double) / sizeof(Js::JavascriptArray::MissingItem);
+ i = i * sizeof(double) / sizeof(Js::JavascriptArray::MissingItem);
+
+ fillMissingItems(TyVar, missingItemCount, offsetStart, sizeof(Js::JavascriptArray::MissingItem));
}
else
{
@@ -3783,16 +3811,9 @@ Lowerer::GenerateProfiledNewScArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteI
{
return false;
}
- uint const offsetStart = sizeof(Js::SparseArraySegmentBase);
+
headOpnd = GenerateArrayLiteralsAlloc(instr, &size, arrayInfo, &isZeroed);
- const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func);
- for (; i < size; i++)
- {
- GenerateMemInit(
- headOpnd, offsetStart + i * sizeof(Js::Var),
- GetMissingItemOpndForAssignment(TyVar, m_func),
- instr, isZeroed);
- }
+ fillMissingItems(TyVar, size, sizeof(Js::SparseArraySegmentBase), sizeof(Js::Var));
}
// Skip pass the helper call
@@ -4113,11 +4134,12 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
// Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit.
uint const offsetStart = sizeof(Js::SparseArraySegmentBase);
- for (uint i = 0; i < size; i++)
+ uint const missingItemCount = size * sizeof(double) / sizeof(Js::JavascriptArray::MissingItem);
+ for (uint i = 0; i < missingItemCount; i++)
{
GenerateMemInit(
- headOpnd, offsetStart + i * sizeof(double),
- GetMissingItemOpndForAssignment(TyFloat64, m_func),
+ headOpnd, offsetStart + i * sizeof(Js::JavascriptArray::MissingItem),
+ IR::AddrOpnd::New(Js::JavascriptArray::MissingItem, IR::AddrOpndKindConstantAddress, m_func, true),
instr, isZeroed);
}
}
@@ -4127,9 +4149,9 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
headOpnd = GenerateArrayObjectsAlloc(instr, &size, arrayInfo, &isZeroed, isNoArgs);
for (uint i = 0; i < size; i++)
{
- GenerateMemInit(
+ GenerateMemInit(
headOpnd, offsetStart + i * sizeof(Js::Var),
- GetMissingItemOpndForAssignment(TyVar, m_func),
+ IR::AddrOpnd::New(Js::JavascriptArray::MissingItem, IR::AddrOpndKindConstantAddress, m_func, true),
instr, isZeroed);
}
}
@@ -4160,8 +4182,8 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
uint allocationBucketsCount = ArrayType::AllocationBucketsCount;
uint(*allocationBuckets)[Js::JavascriptArray::AllocationBucketsInfoSize];
allocationBuckets = ArrayType::allocationBuckets;
-
- IRType missingItemType = (arrayInfo ? arrayInfo->IsNativeIntArray() ? IRType::TyInt32 : arrayInfo->IsNativeFloatArray() ? IRType::TyFloat64 : IRType::TyVar : IRType::TyVar);
+ uint sizeFactor = 1;
+ IRType missingItemType = (arrayInfo && arrayInfo->IsNativeIntArray()) ? IRType::TyInt32 : IRType::TyVar;
IR::LabelInstr * arrayInitDone = IR::LabelInstr::New(Js::OpCode::Label, func);
bool isNativeArray = arrayInfo && (arrayInfo->IsNativeIntArray() || arrayInfo->IsNativeFloatArray());
@@ -4173,7 +4195,9 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
}
else if (arrayInfo && arrayInfo->IsNativeFloatArray())
{
- sizeOfElement = sizeof(double);
+ // Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit.
+ sizeFactor = sizeof(double) / sizeof(Js::JavascriptArray::MissingItem);
+ sizeOfElement = sizeof(Js::JavascriptArray::MissingItem);
GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel);
}
else
@@ -4203,7 +4227,7 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
for (uint8 i = 0;i < allocationBucketsCount;i++)
{
- missingItemCount = allocationBuckets[i][Js::JavascriptArray::MissingElementsCountIndex];
+ missingItemCount = allocationBuckets[i][Js::JavascriptArray::MissingElementsCountIndex] * sizeFactor;
if (i > 0)
{
@@ -4234,7 +4258,7 @@ Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSi
// Ensure no. of missingItems written are same
Assert(missingItemIndex == missingItemInitializedSoFar);
// Ensure no. of missingItems match what present in allocationBuckets
- Assert(missingItemIndex == allocationBuckets[allocationBucketsCount - 1][Js::JavascriptArray::MissingElementsCountIndex]);
+ Assert(missingItemIndex == allocationBuckets[allocationBucketsCount - 1][Js::JavascriptArray::MissingElementsCountIndex] * sizeFactor);
instr->InsertBefore(arrayInitDone);
@@ -4362,11 +4386,11 @@ Lowerer::GenerateProfiledNewScFloatArrayFastPath(IR::Instr *instr, Js::ArrayCall
// Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit.
uint const offsetStart = sizeof(Js::SparseArraySegmentBase) + doubles->count * sizeof(double);
- uint const missingItem = (size - doubles->count);
+ uint const missingItem = (size - doubles->count) * sizeof(double) / sizeof(Js::JavascriptArray::MissingItem);
for (uint i = 0; i < missingItem; i++)
{
- GenerateMemInit(headOpnd, offsetStart + i * sizeof(double),
- GetMissingItemOpndForAssignment(TyFloat64, m_func), instr, isHeadSegmentZeroed);
+ GenerateMemInit(headOpnd, offsetStart + i * sizeof(Js::JavascriptArray::MissingItem),
+ IR::AddrOpnd::New(Js::JavascriptArray::MissingItem, IR::AddrOpndKindConstantAddress, m_func, true), instr, isHeadSegmentZeroed);
}
// Skip pass the helper call
IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func);
@@ -4605,40 +4629,18 @@ Lowerer::LowerNewScObject(IR::Instr *newObjInstr, bool callCtor, bool hasArgs, b
{
Assert(!newObjDst->CanStoreTemp());
// createObjDst = NewScObject...(ctorOpnd)
+ newScHelper = !callCtor ?
+ (isBaseClassConstructorNewScObject ?
+ (hasArgs ? IR::HelperNewScObjectNoCtorFull : IR::HelperNewScObjectNoArgNoCtorFull) :
+ (hasArgs ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArgNoCtor)) :
+ (hasArgs || usedFixedCtorCache ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArg);
LoadScriptContext(newObjInstr);
+ m_lowererMD.LoadHelperArgument(newObjInstr, newObjInstr->GetSrc1());
- if (callCtor)
- {
- newScHelper = (hasArgs || usedFixedCtorCache ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArg);
-
- m_lowererMD.LoadHelperArgument(newObjInstr, newObjInstr->GetSrc1());
-
- newScObjCall = IR::Instr::New(Js::OpCode::Call, createObjDst, IR::HelperCallOpnd::New(newScHelper, func), func);
- newObjInstr->InsertBefore(newScObjCall);
- m_lowererMD.LowerCall(newScObjCall, 0);
- }
- else
- {
- newScHelper =
- (isBaseClassConstructorNewScObject ?
- (hasArgs ? IR::HelperNewScObjectNoCtorFull : IR::HelperNewScObjectNoArgNoCtorFull) :
- (hasArgs ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArgNoCtor));
-
- // Branch around the helper call to execute the inlined ctor.
- Assert(callCtorLabel != nullptr);
- newObjInstr->InsertAfter(callCtorLabel);
-
- // Change the NewScObject* to a helper call on the spot. This generates implicit call bailout for us if we need one.
- m_lowererMD.LoadHelperArgument(newObjInstr, newObjInstr->UnlinkSrc1());
- m_lowererMD.ChangeToHelperCall(newObjInstr, newScHelper);
-
- // Then we're done.
- Assert(createObjDst == newObjDst);
-
- // Return the first instruction above the region we've just lowered.
- return RemoveLoweredRegionStartMarker(startMarkerInstr);
- }
+ newScObjCall = IR::Instr::New(Js::OpCode::Call, createObjDst, IR::HelperCallOpnd::New(newScHelper, func), func);
+ newObjInstr->InsertBefore(newScObjCall);
+ m_lowererMD.LowerCall(newScObjCall, 0);
}
}
@@ -4883,6 +4885,9 @@ bool Lowerer::TryLowerNewScObjectWithFixedCtorCache(IR::Instr* newObjInstr, IR::
skipNewScObj = false;
returnNewScObj = false;
+ AssertMsg(!PHASE_OFF(Js::ObjTypeSpecNewObjPhase, this->m_func) || !newObjInstr->HasBailOutInfo(),
+ "Why do we have bailout on NewScObject when ObjTypeSpecNewObj is off?");
+
if (PHASE_OFF(Js::FixedNewObjPhase, newObjInstr->m_func) && PHASE_OFF(Js::ObjTypeSpecNewObjPhase, this->m_func))
{
return false;
@@ -4890,10 +4895,11 @@ bool Lowerer::TryLowerNewScObjectWithFixedCtorCache(IR::Instr* newObjInstr, IR::
JITTimeConstructorCache * ctorCache;
- if (newObjInstr->HasBailOutInfo() && (newObjInstr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutFailedCtorGuardCheck)
+ if (newObjInstr->HasBailOutInfo())
{
Assert(newObjInstr->IsNewScObjectInstr());
Assert(newObjInstr->IsProfiledInstr());
+ Assert(newObjInstr->GetBailOutKind() == IR::BailOutFailedCtorGuardCheck);
emitBailOut = true;
@@ -6019,7 +6025,7 @@ Lowerer::GenerateFastLdMethodFromFlags(IR::Instr * instrLdFld)
IR::PropertySymOpnd * propertySymOpnd = opndSrc->AsPropertySymOpnd();
- Assert(!instrLdFld->DoStackArgsOpt(this->m_func));
+ Assert(!instrLdFld->DoStackArgsOpt());
if (propertySymOpnd->IsTypeCheckSeqCandidate())
{
@@ -6240,12 +6246,12 @@ Lowerer::GenerateLdFldWithCachedType(IR::Instr * instrLdFld, bool* continueAsHel
}
else
{
- opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrLdFld, propertySymOpnd, propertySymOpnd->IsTypeChecked() || emitTypeCheck);
+ opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrLdFld, propertySymOpnd);
}
// Load the value from the slot, getting the slot ID from the cache.
uint16 index = propertySymOpnd->GetSlotIndex();
- AssertOrFailFast(index != (uint16)-1);
+ Assert(index != -1);
if (opndSlotArray->IsRegOpnd())
{
@@ -6455,6 +6461,10 @@ Lowerer::GenerateCheckFixedFld(IR::Instr * instrChkFld)
{
Assert(labelBailOut == nullptr);
AssertMsg(!instrChkFld->HasBailOutInfo(), "Why does a direct fixed field check have bailout?");
+ if (propertySymOpnd->ProducesAuxSlotPtr())
+ {
+ this->GenerateAuxSlotPtrLoad(propertySymOpnd, instrChkFld);
+ }
instrChkFld->Remove();
return true;
}
@@ -6467,6 +6477,11 @@ Lowerer::GenerateCheckFixedFld(IR::Instr * instrChkFld)
instrChkFld->InsertBefore(labelBailOut);
instrChkFld->InsertAfter(labelDone);
+ if (propertySymOpnd->ProducesAuxSlotPtr())
+ {
+ this->GenerateAuxSlotPtrLoad(propertySymOpnd, labelDone->m_next);
+ }
+
// Convert the original instruction to a bailout.
Assert(instrChkFld->HasBailOutInfo());
@@ -6519,6 +6534,11 @@ Lowerer::GenerateCheckObjType(IR::Instr * instrChkObjType)
instrChkObjType->InsertBefore(labelBailOut);
instrChkObjType->InsertAfter(labelDone);
+ if (propertySymOpnd->ProducesAuxSlotPtr())
+ {
+ this->GenerateAuxSlotPtrLoad(propertySymOpnd, labelDone->m_next);
+ }
+
// Convert the original instruction to a bailout.
Assert(instrChkObjType->HasBailOutInfo());
@@ -6544,17 +6564,18 @@ Lowerer::LowerAdjustObjType(IR::Instr * instrAdjustObjType)
bool adjusted = this->GenerateAdjustBaseSlots(
instrAdjustObjType, baseOpnd, JITTypeHolder((JITType*)initialTypeOpnd->m_metadata), JITTypeHolder((JITType*)finalTypeOpnd->m_metadata));
- if (adjusted)
+ if (instrAdjustObjType->m_opcode == Js::OpCode::AdjustObjTypeReloadAuxSlotPtr)
{
+ Assert(adjusted);
+
// We reallocated the aux slots, so reload them if necessary.
StackSym * auxSlotPtrSym = baseOpnd->m_sym->GetAuxSlotPtrSym();
- if (auxSlotPtrSym)
- {
- IR::Opnd *opndIndir = IR::IndirOpnd::New(baseOpnd, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func);
- IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func);
- regOpnd->SetIsJITOptimizedReg(true);
- Lowerer::InsertMove(regOpnd, opndIndir, instrAdjustObjType);
- }
+ Assert(auxSlotPtrSym);
+
+ IR::Opnd *opndIndir = IR::IndirOpnd::New(baseOpnd, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func);
+ IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func);
+ regOpnd->SetIsJITOptimizedReg(true);
+ Lowerer::InsertMove(regOpnd, opndIndir, instrAdjustObjType);
}
this->m_func->PinTypeRef((JITType*)finalTypeOpnd->m_metadata);
@@ -6896,6 +6917,30 @@ Lowerer::LowerNewScGenFuncHomeObj(IR::Instr * newScFuncInstr)
return newScFuncInstr;
}
+IR::Instr *
+Lowerer::LowerStPropIdArrFromVar(IR::Instr * stPropIdInstr)
+{
+ IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperStPropIdArrFromVar, this->m_func);
+
+ IR::Opnd * src1 = stPropIdInstr->UnlinkSrc1();
+ stPropIdInstr->SetSrc1(helperOpnd);
+ stPropIdInstr->SetSrc2(src1);
+
+ return m_lowererMD.LowerCallHelper(stPropIdInstr);
+}
+
+IR::Instr *
+Lowerer::LowerRestify(IR::Instr * newRestInstr)
+{
+ IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperRestify, this->m_func);
+
+ IR::Opnd * src1 = newRestInstr->UnlinkSrc1();
+ newRestInstr->SetSrc1(helperOpnd);
+ newRestInstr->SetSrc2(src1);
+
+ return m_lowererMD.LowerCallHelper(newRestInstr);
+}
+
///----------------------------------------------------------------------------
///
/// Lowerer::LowerScopedLdFld
@@ -7222,11 +7267,11 @@ Lowerer::GenerateDirectFieldStore(IR::Instr* instrStFld, IR::PropertySymOpnd* pr
{
Func* func = instrStFld->m_func;
- IR::Opnd *opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrStFld, propertySymOpnd, propertySymOpnd->IsTypeChecked() || instrStFld->HasTypeCheckBailOut());
+ IR::Opnd *opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrStFld, propertySymOpnd);
// Store the value to the slot, getting the slot index from the cache.
uint16 index = propertySymOpnd->GetSlotIndex();
- AssertOrFailFast(index != (uint16)-1);
+ Assert(index != -1);
#if defined(RECYCLER_WRITE_BARRIER_JIT) && (defined(_M_IX86) || defined(_M_AMD64))
if (opndSlotArray->IsRegOpnd())
@@ -7375,19 +7420,6 @@ Lowerer::GenerateStFldWithCachedType(IR::Instr *instrStFld, bool* continueAsHelp
{
Assert(labelTypeCheckFailed == nullptr && labelBothTypeChecksFailed == nullptr);
AssertMsg(!instrStFld->HasBailOutInfo(), "Why does a direct field store have bailout?");
-
- if (propertySymOpnd->HasInitialType() && propertySymOpnd->HasFinalType())
- {
- bool isPrototypeTypeHandler = propertySymOpnd->GetInitialType()->GetTypeHandler()->IsPrototype();
- if (isPrototypeTypeHandler)
- {
- LoadScriptContext(instrStFld);
- m_lowererMD.LoadHelperArgument(instrStFld, IR::IntConstOpnd::New(propertySymOpnd->GetPropertyId(), TyInt32, m_func, true));
- IR::Instr * invalidateCallInstr = IR::Instr::New(Js::OpCode::Call, m_func);
- instrStFld->InsertBefore(invalidateCallInstr);
- m_lowererMD.ChangeToHelperCall(invalidateCallInstr, IR::HelperInvalidateProtoCaches);
- }
- }
instrStFld->Remove();
return true;
}
@@ -7420,6 +7452,9 @@ Lowerer::GenerateStFldWithCachedType(IR::Instr *instrStFld, bool* continueAsHelp
if (hasTypeCheckBailout)
{
+ AssertMsg(PHASE_ON1(Js::ObjTypeSpecIsolatedFldOpsWithBailOutPhase) || !PHASE_ON(Js::DeadStoreTypeChecksOnStoresPhase, this->m_func) || !propertySymOpnd->IsTypeDead() || propertySymOpnd->TypeCheckRequired(),
+ "Why does a field store have a type check bailout, if its type is dead?");
+
if (instrStFld->GetBailOutInfo()->bailOutInstr != instrStFld)
{
// Set the cache index in the bailout info so that the generated code will write it into the
@@ -7479,7 +7514,7 @@ Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *prope
// cache and no type check bailout. In the latter case, we can wind up doing expensive failed equivalence checks
// repeatedly and never rejit.
bool doEquivTypeCheck =
- (instrChk->HasEquivalentTypeCheckBailOut() && (propertySymOpnd->TypeCheckRequired() || propertySymOpnd == instrChk->GetDst())) ||
+ instrChk->HasEquivalentTypeCheckBailOut() ||
(propertySymOpnd->HasEquivalentTypeSet() &&
!(propertySymOpnd->HasFinalType() && propertySymOpnd->HasInitialType()) &&
!propertySymOpnd->MustDoMonoCheck() &&
@@ -7631,11 +7666,6 @@ Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *prope
InsertObjectPoison(regOpnd, branchInstr, instrChk, false);
}
- if (propertySymOpnd->NeedsAuxSlotPtrSymLoad())
- {
- propertySymOpnd->GenerateAuxSlotPtrSymLoad(instrChk);
- }
-
// Don't pin the type for polymorphic operations. The code can successfully execute even if this type is no longer referenced by any objects,
// as long as there are other objects with types equivalent on the properties referenced by this code. The type is kept alive until entry point
// installation by the JIT transfer data, and after that by the equivalent type cache, so it will stay alive unless or until it gets evicted
@@ -8190,11 +8220,6 @@ Lowerer::GenerateFieldStoreWithTypeChange(IR::Instr * instrStFld, IR::PropertySy
// Adjust instance slots, if necessary.
this->GenerateAdjustSlots(instrStFld, propertySymOpnd, initialType, finalType);
- if (propertySymOpnd->NeedsAuxSlotPtrSymLoad())
- {
- propertySymOpnd->GenerateAuxSlotPtrSymLoad(instrStFld);
- }
-
// We should never add properties to objects of static types.
Assert(Js::DynamicType::Is(finalType->GetTypeId()));
@@ -8209,16 +8234,6 @@ Lowerer::GenerateFieldStoreWithTypeChange(IR::Instr * instrStFld, IR::PropertySy
// Now do the store.
GenerateDirectFieldStore(instrStFld, propertySymOpnd);
-
- bool isPrototypeTypeHandler = initialType->GetTypeHandler()->IsPrototype();
- if (isPrototypeTypeHandler)
- {
- LoadScriptContext(instrStFld);
- m_lowererMD.LoadHelperArgument(instrStFld, IR::IntConstOpnd::New(propertySymOpnd->GetPropertyId(), TyInt32, m_func, true));
- IR::Instr * invalidateCallInstr = IR::Instr::New(Js::OpCode::Call, m_func);
- instrStFld->InsertBefore(invalidateCallInstr);
- m_lowererMD.ChangeToHelperCall(invalidateCallInstr, IR::HelperInvalidateProtoCaches);
- }
}
bool
@@ -8660,8 +8675,9 @@ Lowerer::LowerBinaryHelper(IR::Instr *instr, IR::JnHelperMethod helperMethod)
// instrPrev.
IR::Instr *instrPrev = nullptr;
- AssertMsg((Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg1Unsigned1 && !instr->GetDst()) ||
+ AssertMsg((Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg1Unsigned1) ||
Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3 ||
+ Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2 ||
Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2Int1 ||
Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::ElementU ||
instr->m_opcode == Js::OpCode::InvalCachedScope, "Expected a binary instruction...");
@@ -8685,6 +8701,7 @@ Lowerer::LowerBinaryHelperMem(IR::Instr *instr, IR::JnHelperMethod helperMethod)
IR::Instr *instrPrev;
AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3 ||
+ Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2 ||
Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2Int1 ||
Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg1Unsigned1, "Expected a binary instruction...");
@@ -9002,8 +9019,6 @@ Lowerer::LowerStElemI(IR::Instr * instr, Js::PropertyOperationFlags flags, bool
AssertMsg(dst->IsIndirOpnd(), "Expected indirOpnd on StElementI");
- bool allowConvert = dst->AsIndirOpnd()->ConversionAllowed();
-
#if !FLOATVAR
if (dst->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyOptimizedTypedArray() && src1->IsRegOpnd())
{
@@ -9088,17 +9103,15 @@ Lowerer::LowerStElemI(IR::Instr * instr, Js::PropertyOperationFlags flags, bool
{
helperMethod =
srcType == TyVar ? IR::HelperOp_SetElementI_Int32 :
- srcType == TyInt32 ?
- (allowConvert ? IR::HelperOp_SetNativeIntElementI_Int32 : IR::HelperOp_SetNativeIntElementI_Int32_NoConvert) :
- (allowConvert ? IR::HelperOp_SetNativeFloatElementI_Int32 : IR::HelperOp_SetNativeFloatElementI_Int32_NoConvert) ;
+ srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI_Int32 :
+ IR::HelperOp_SetNativeFloatElementI_Int32;
}
else if (indexOpnd->GetType() == TyUint32)
{
helperMethod =
srcType == TyVar ? IR::HelperOp_SetElementI_UInt32 :
- srcType == TyInt32 ?
- (allowConvert ? IR::HelperOp_SetNativeIntElementI_UInt32 : IR::HelperOp_SetNativeIntElementI_UInt32_NoConvert) :
- (allowConvert ? IR::HelperOp_SetNativeFloatElementI_UInt32 : IR::HelperOp_SetNativeFloatElementI_UInt32_NoConvert) ;
+ srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI_UInt32 :
+ IR::HelperOp_SetNativeFloatElementI_UInt32;
}
else
{
@@ -9116,9 +9129,8 @@ Lowerer::LowerStElemI(IR::Instr * instr, Js::PropertyOperationFlags flags, bool
if (srcType != TyVar)
{
- helperMethod = srcType == TyInt32 ?
- (allowConvert ? IR::HelperOp_SetNativeIntElementI : IR::HelperOp_SetNativeIntElementI_NoConvert) :
- (allowConvert ? IR::HelperOp_SetNativeFloatElementI : IR::HelperOp_SetNativeFloatElementI_NoConvert);
+ helperMethod =
+ srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI : IR::HelperOp_SetNativeFloatElementI;
}
}
@@ -9167,7 +9179,7 @@ Lowerer::LowerLdElemI(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool i
return instrPrev;
}
- if (!isHelper && instr->DoStackArgsOpt(this->m_func))
+ if (!isHelper && instr->DoStackArgsOpt())
{
IR::LabelInstr * labelLdElem = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func);
// Pass in null for labelFallThru to only generate the LdHeapArgument call
@@ -17672,12 +17684,12 @@ Lowerer::GenerateFastLdElemI(IR::Instr *& ldElem, bool *instrIsInHelperBlockRef)
labelFallThru = ldElem->GetOrCreateContinueLabel();
labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true);
// If we know for sure (based on flow graph) we're loading from the arguments object, then ignore the (path-based) profile info.
- bool isNativeArrayLoad = !ldElem->DoStackArgsOpt(this->m_func) && indirOpnd->GetBaseOpnd()->GetValueType().IsLikelyNativeArray();
+ bool isNativeArrayLoad = !ldElem->DoStackArgsOpt() && indirOpnd->GetBaseOpnd()->GetValueType().IsLikelyNativeArray();
bool needMissingValueCheck = true;
bool emittedFastPath = false;
bool emitBailout = false;
- if (ldElem->DoStackArgsOpt(this->m_func))
+ if (ldElem->DoStackArgsOpt())
{
emittedFastPath = GenerateFastArgumentsLdElemI(ldElem, labelFallThru);
emitBailout = true;
@@ -18968,7 +18980,7 @@ Lowerer::GenerateFastLdLen(IR::Instr *ldLen, bool *instrIsInHelperBlockRef)
IR::LabelInstr *const labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true);
- if (ldLen->DoStackArgsOpt(this->m_func))
+ if (ldLen->DoStackArgsOpt())
{
GenerateFastArgumentsLdLen(ldLen, ldLen->GetOrCreateContinueLabel());
ldLen->Remove();
@@ -21672,7 +21684,7 @@ Lowerer::GenerateFastArgumentsLdElemI(IR::Instr* ldElem, IR::LabelInstr *labelFa
//labelCreateHeapArgs:
// ---Bail out to create Heap Arguments object
- Assert(ldElem->DoStackArgsOpt(this->m_func));
+ Assert(ldElem->DoStackArgsOpt());
IR::IndirOpnd *indirOpnd = ldElem->GetSrc1()->AsIndirOpnd();
bool isInlinee = ldElem->m_func->IsInlinee();
@@ -21803,7 +21815,7 @@ Lowerer::GenerateFastArgumentsLdLen(IR::Instr *ldLen, IR::LabelInstr* labelFallT
// JMP $fallthrough
//$helper:
- Assert(ldLen->DoStackArgsOpt(this->m_func));
+ Assert(ldLen->DoStackArgsOpt());
if(ldLen->m_func->IsInlinee())
{
@@ -22004,7 +22016,7 @@ Lowerer::GenerateFastLdFld(IR::Instr * const instrLdFld, IR::JnHelperMethod help
IR::Opnd * opndSrc = instrLdFld->GetSrc1();
AssertMsg(opndSrc->IsSymOpnd() && opndSrc->AsSymOpnd()->IsPropertySymOpnd() && opndSrc->AsSymOpnd()->m_sym->IsPropertySym(), "Expected PropertySym as src of LdFld");
- Assert(!instrLdFld->DoStackArgsOpt(this->m_func));
+ Assert(!instrLdFld->DoStackArgsOpt());
IR::PropertySymOpnd * propertySymOpnd = opndSrc->AsPropertySymOpnd();
PropertySym * propertySym = propertySymOpnd->m_sym->AsPropertySym();
@@ -24837,7 +24849,7 @@ void
Lowerer::GenerateJavascriptOperatorsIsConstructorGotoElse(IR::Instr *instrInsert, IR::RegOpnd *instanceRegOpnd, IR::LabelInstr *labelReturnTrue, IR::LabelInstr *labelReturnFalse)
{
// $ProxyLoop:
- // // if (!RecyclableObject::Is(instance)) { goto $ReturnFalse }; // omitted: RecyclableObject::Is(instance) always true
+ // // if (!VarIs(instance)) { goto $ReturnFalse }; // omitted: VarIs(instance) always true
// MOV s0, instance->type
// MOV s1, s0->typeId
// CMP s1, TypeIds_Proxy
@@ -25018,7 +25030,7 @@ Lowerer::GenerateLdHomeObjProto(IR::Instr* instr)
// TEST instance, instance
// JZ $Done
//
- // if (!RecyclableObject::Is(instance)) goto $Done
+ // if (!VarIs(instance)) goto $Done
// MOV type, [instance+Offset(type)]
// MOV typeId, [type+Offset(typeId)]
// CMP typeId, TypeIds_Null
@@ -25033,7 +25045,7 @@ Lowerer::GenerateLdHomeObjProto(IR::Instr* instr)
// instance = ((RecyclableObject*)instance)->GetPrototype();
// if (instance == nullptr) goto $Done;
//
- // if (!RecyclableObject::Is(instance)) goto $Done
+ // if (!VarIs(instance)) goto $Done
//
// MOV dst, instance
// $Done:
@@ -25475,12 +25487,6 @@ Lowerer::GenerateGetImmutableOrScriptUnreferencedString(IR::RegOpnd * strOpnd, I
return dstOpnd;
}
-void
-Lowerer::LowerConvPropertyKey(IR::Instr* instr)
-{
- LowerConvStrCommon(IR::HelperOp_ConvPropertyKey, instr);
-}
-
void
Lowerer::LowerConvStrCommon(IR::JnHelperMethod helper, IR::Instr * instr)
{
@@ -27689,23 +27695,31 @@ Lowerer::LowerConvNum(IR::Instr *instrLoad, bool noMathFastPath)
}
IR::Opnd *
-Lowerer::LoadSlotArrayWithCachedLocalType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd, bool canReuseAuxSlotPtr)
+Lowerer::LoadSlotArrayWithCachedLocalType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd)
{
IR::RegOpnd *opndBase = propertySymOpnd->CreatePropertyOwnerOpnd(m_func);
if (propertySymOpnd->UsesAuxSlot())
{
// If we use the auxiliary slot array, load it and return it
- if (canReuseAuxSlotPtr)
+ IR::RegOpnd * opndSlotArray;
+ if (propertySymOpnd->IsAuxSlotPtrSymAvailable() || propertySymOpnd->ProducesAuxSlotPtr())
{
+ // We want to reload and/or reuse the shared aux slot ptr sym
StackSym * auxSlotPtrSym = propertySymOpnd->GetAuxSlotPtrSym();
- if (auxSlotPtrSym != nullptr)
+ Assert(auxSlotPtrSym != nullptr);
+
+ opndSlotArray = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func);
+ opndSlotArray->SetIsJITOptimizedReg(true);
+ if (!propertySymOpnd->ProducesAuxSlotPtr())
{
- IR::RegOpnd * opndAuxSlotPtr = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func);
- opndAuxSlotPtr->SetIsJITOptimizedReg(true);
- return opndAuxSlotPtr;
+ // No need to reload
+ return opndSlotArray;
}
}
- IR::RegOpnd * opndSlotArray = IR::RegOpnd::New(TyMachReg, this->m_func);
+ else
+ {
+ opndSlotArray = IR::RegOpnd::New(TyMachReg, this->m_func);
+ }
IR::Opnd *opndIndir = IR::IndirOpnd::New(opndBase, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func);
Lowerer::InsertMove(opndSlotArray, opndIndir, instrInsert);
@@ -28268,6 +28282,19 @@ Lowerer::AddBailoutToHelperCallInstr(IR::Instr * helperCallInstr, BailOutInfo *
return helperCallInstr;
}
+void
+Lowerer::GenerateAuxSlotPtrLoad(IR::PropertySymOpnd *propertySymOpnd, IR::Instr * instrInsert)
+{
+ StackSym * auxSlotPtrSym = propertySymOpnd->GetAuxSlotPtrSym();
+ Assert(auxSlotPtrSym);
+ Func * func = instrInsert->m_func;
+
+ IR::Opnd *opndIndir = IR::IndirOpnd::New(propertySymOpnd->CreatePropertyOwnerOpnd(func), Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, func);
+ IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, func);
+ regOpnd->SetIsJITOptimizedReg(true);
+ InsertMove(regOpnd, opndIndir, instrInsert);
+}
+
void
Lowerer::InsertAndLegalize(IR::Instr * instr, IR::Instr* insertBeforeInstr)
{
diff --git a/deps/chakrashim/core/lib/Backend/Lower.h b/deps/chakrashim/core/lib/Backend/Lower.h
index 569ab75c371..4625dc5a057 100644
--- a/deps/chakrashim/core/lib/Backend/Lower.h
+++ b/deps/chakrashim/core/lib/Backend/Lower.h
@@ -144,6 +144,8 @@ class Lowerer
IR::Instr * LowerNewScGenFunc(IR::Instr *instr);
IR::Instr * LowerNewScFuncHomeObj(IR::Instr *instr);
IR::Instr * LowerNewScGenFuncHomeObj(IR::Instr *instr);
+ IR::Instr * LowerStPropIdArrFromVar(IR::Instr *instr);
+ IR::Instr * LowerRestify(IR::Instr *instr);
IR::Instr* GenerateCompleteStFld(IR::Instr* instr, bool emitFastPath, IR::JnHelperMethod monoHelperAfterFastPath, IR::JnHelperMethod polyHelperAfterFastPath,
IR::JnHelperMethod monoHelperWithoutFastPath, IR::JnHelperMethod polyHelperWithoutFastPath, bool withPutFlags, Js::PropertyOperationFlags flags);
bool GenerateStFldWithCachedType(IR::Instr * instrStFld, bool* continueAsHelperOut, IR::LabelInstr** labelHelperOut, IR::RegOpnd** typeOpndOut);
@@ -374,11 +376,11 @@ class Lowerer
void GenerateFastBrBReturn(IR::Instr * instr);
public:
- static IR::Instr *Lowerer::HoistIndirOffset(IR::Instr* instr, IR::IndirOpnd *indirOpnd, RegNum regNum);
- static IR::Instr *Lowerer::HoistIndirOffsetAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum);
- static IR::Instr *Lowerer::HoistIndirIndexOpndAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, IR::Opnd *indexOpnd, RegNum regNum);
- static IR::Instr *Lowerer::HoistSymOffset(IR::Instr *instr, IR::SymOpnd *symOpnd, RegNum baseReg, uint32 offset, RegNum regNum);
- static IR::Instr *Lowerer::HoistSymOffsetAsAdd(IR::Instr* instr, IR::SymOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum);
+ static IR::Instr *HoistIndirOffset(IR::Instr* instr, IR::IndirOpnd *indirOpnd, RegNum regNum);
+ static IR::Instr *HoistIndirOffsetAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum);
+ static IR::Instr *HoistIndirIndexOpndAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, IR::Opnd *indexOpnd, RegNum regNum);
+ static IR::Instr *HoistSymOffset(IR::Instr *instr, IR::SymOpnd *symOpnd, RegNum baseReg, uint32 offset, RegNum regNum);
+ static IR::Instr *HoistSymOffsetAsAdd(IR::Instr* instr, IR::SymOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum);
static IR::LabelInstr * InsertLabel(const bool isHelper, IR::Instr *const insertBeforeInstr);
@@ -624,7 +626,7 @@ class Lowerer
bool GenerateFastArgumentsLdElemI(IR::Instr* ldElem, IR::LabelInstr *labelFallThru);
bool GenerateFastRealStackArgumentsLdLen(IR::Instr *ldLen);
bool GenerateFastArgumentsLdLen(IR::Instr *ldLen, IR::LabelInstr* labelFallThru);
- static const uint16 GetFormalParamOffset() { /*formal start after frame pointer, return address, function object, callInfo*/ return 4;};
+ static uint16 GetFormalParamOffset() { /*formal start after frame pointer, return address, function object, callInfo*/ return 4;};
IR::RegOpnd* GenerateFunctionTypeFromFixedFunctionObject(IR::Instr *callInstr, IR::Opnd* functionObjOpnd);
@@ -634,6 +636,7 @@ class Lowerer
void GenerateSetObjectTypeFromInlineCache(IR::Instr * instrToInsertBefore, IR::RegOpnd * opndBase, IR::RegOpnd * opndInlineCache, bool isTypeTagged);
bool GenerateFastStFld(IR::Instr * const instrStFld, IR::JnHelperMethod helperMethod, IR::JnHelperMethod polymorphicHelperMethod,
IR::LabelInstr ** labelBailOut, IR::RegOpnd* typeOpnd, bool* pIsHelper, IR::LabelInstr** pLabelHelper, bool withPutFlags = false, Js::PropertyOperationFlags flags = Js::PropertyOperation_None);
+ void GenerateAuxSlotPtrLoad(IR::PropertySymOpnd *propertySymOpnd, IR::Instr *insertInstr);
bool GenerateFastStFldForCustomProperty(IR::Instr *const instr, IR::LabelInstr * *const labelHelperRef);
@@ -670,8 +673,6 @@ class Lowerer
void LowerConvPrimStr(IR::Instr * instr);
void LowerConvStrCommon(IR::JnHelperMethod helper, IR::Instr * instr);
- void LowerConvPropertyKey(IR::Instr* instr);
-
void GenerateRecyclerAlloc(IR::JnHelperMethod allocHelper, size_t allocSize, IR::RegOpnd* newObjDst, IR::Instr* insertionPointInstr, bool inOpHelper = false);
template
@@ -750,7 +751,7 @@ class Lowerer
static IR::RegOpnd * LoadGeneratorArgsPtr(IR::Instr *instrInsert);
static IR::Instr * LoadGeneratorObject(IR::Instr *instrInsert);
- IR::Opnd * LoadSlotArrayWithCachedLocalType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd, bool canReuseAuxSlotPtr);
+ IR::Opnd * LoadSlotArrayWithCachedLocalType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd);
IR::Opnd * LoadSlotArrayWithCachedProtoType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd);
IR::Instr * LowerLdAsmJsEnv(IR::Instr *instr);
IR::Instr * LowerLdEnv(IR::Instr *instr);
diff --git a/deps/chakrashim/core/lib/Backend/LowerMDShared.cpp b/deps/chakrashim/core/lib/Backend/LowerMDShared.cpp
index a5d128e8eb4..85ae2db01ed 100644
--- a/deps/chakrashim/core/lib/Backend/LowerMDShared.cpp
+++ b/deps/chakrashim/core/lib/Backend/LowerMDShared.cpp
@@ -191,12 +191,18 @@ LowererMD::LowerCallHelper(IR::Instr *instrCall)
IR::JnHelperMethod helperMethod = instrCall->GetSrc1()->AsHelperCallOpnd()->m_fnHelper;
instrCall->FreeSrc1();
-
+
#ifndef _M_X64
+ bool callHasDst = instrCall->GetDst() != nullptr;
prevInstr = ChangeToHelperCall(instrCall, helperMethod);
-#endif
-
+ if (callHasDst)
+ {
+ prevInstr = prevInstr->m_prev;
+ }
+ Assert(prevInstr->GetSrc1()->IsHelperCallOpnd() && prevInstr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper == helperMethod);
+#else
prevInstr = instrCall;
+#endif
while (argOpnd)
{
@@ -206,11 +212,14 @@ LowererMD::LowerCallHelper(IR::Instr *instrCall)
Assert(regArg->m_sym->m_isSingleDef);
IR::Instr *instrArg = regArg->m_sym->m_instrDef;
- Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A ||
- (helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A)
- );
+ Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A || instrArg->m_opcode == Js::OpCode::ExtendArg_A &&
+ (
+ helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperRestify ||
+ helperMethod == IR::JnHelperMethod::HelperStPropIdArrFromVar
+ ));
prevInstr = LoadHelperArgument(prevInstr, instrArg->GetSrc1());
argOpnd = instrArg->GetSrc2();
@@ -1284,7 +1293,7 @@ void LowererMD::ChangeToIMul(IR::Instr *const instr, bool hasOverflowCheck)
EmitInt4Instr(instr); // IMUL2
}
-const uint16
+uint16
LowererMD::GetFormalParamOffset()
{
//In x86\x64 formal params were offset from EBP by the EBP chain, return address, and the 2 non-user params
@@ -2644,9 +2653,7 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
bool isFloatSrc = src1->IsFloat();
bool isInt64Src = src1->IsInt64();
Assert(!isFloatSrc || src2->IsFloat());
- Assert(!isFloatSrc || isIntDst);
Assert(!isInt64Src || src2->IsInt64());
- Assert(!isInt64Src || isIntDst);
Assert(!isFloatSrc || AutoSystemInfo::Data.SSE2Available());
IR::Opnd *opnd;
IR::Instr *newInstr;
@@ -2675,8 +2682,11 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
done = instr;
}
+ bool isNegOpt = instr->m_opcode == Js::OpCode::CmNeq_A || instr->m_opcode == Js::OpCode::CmSrNeq_A;
+ bool initDstToFalse = true;
if (isIntDst)
{
+ // Fast path for int src with destination type specialized to int
// reg = MOV 0 will get peeped to XOR reg, reg which sets the flags.
// Put the MOV before the CMP, but use a tmp if dst == src1/src2
if (dst->IsEqual(src1) || dst->IsEqual(src2))
@@ -2684,7 +2694,7 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
tmp = IR::RegOpnd::New(dst->GetType(), this->m_func);
}
// dst = MOV 0
- if (isFloatSrc && instr->m_opcode == Js::OpCode::CmNeq_A)
+ if (isFloatSrc && isNegOpt)
{
opnd = IR::IntConstOpnd::New(1, TyInt32, this->m_func);
}
@@ -2694,6 +2704,22 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
}
m_lowerer->InsertMove(tmp, opnd, done);
}
+ else if (isFloatSrc)
+ {
+ // Fast path for float src when destination is a var
+ // Assign default value for destination in case either src is NaN
+ Assert(dst->IsVar());
+ if (isNegOpt)
+ {
+ opnd = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue);
+ }
+ else
+ {
+ opnd = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse);
+ initDstToFalse = false;
+ }
+ Lowerer::InsertMove(tmp, opnd, done);
+ }
Js::OpCode cmpOp;
if (isFloatSrc)
@@ -2724,7 +2750,9 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
done->InsertBefore(newInstr);
}
- if (!isIntDst)
+ // For all cases where the operator is a comparison, we do not want to emit False value
+ // since it has already been generated in the if block before.
+ if (!isIntDst && initDstToFalse)
{
opnd = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse);
Lowerer::InsertMove(tmp, opnd, done);
@@ -2735,11 +2763,13 @@ void LowererMD::GenerateFastCmXx(IR::Instr *instr)
{
case Js::OpCode::CmEq_I4:
case Js::OpCode::CmEq_A:
+ case Js::OpCode::CmSrEq_A:
useCC = isIntDst ? Js::OpCode::SETE : Js::OpCode::CMOVE;
break;
case Js::OpCode::CmNeq_I4:
case Js::OpCode::CmNeq_A:
+ case Js::OpCode::CmSrNeq_A:
useCC = isIntDst ? Js::OpCode::SETNE : Js::OpCode::CMOVNE;
break;
@@ -3823,7 +3853,7 @@ LowererMD::GenerateFastLdMethodFromFlags(IR::Instr * instrLdFld)
IR::PropertySymOpnd * propertySymOpnd = opndSrc->AsPropertySymOpnd();
- Assert(!instrLdFld->DoStackArgsOpt(this->m_func));
+ Assert(!instrLdFld->DoStackArgsOpt());
if (propertySymOpnd->IsTypeCheckSeqCandidate())
{
@@ -7780,12 +7810,25 @@ void LowererMD::GenerateFastInlineBuiltInCall(IR::Instr* instr, IR::JnHelperMeth
switch (instr->m_opcode)
{
case Js::OpCode::InlineMathSqrt:
- // Sqrt maps directly to the SSE2 instruction.
- // src and dst should already be XMM registers, all we need is just change the opcode.
- Assert(helperMethod == (IR::JnHelperMethod)0);
- Assert(instr->GetSrc2() == nullptr);
- instr->m_opcode = instr->GetSrc1()->IsFloat64() ? Js::OpCode::SQRTSD : Js::OpCode::SQRTSS;
- break;
+ {
+ // Sqrt maps directly to the SSE2 instruction.
+ // src and dst should already be XMM registers, all we need is just change the opcode.
+ Assert(helperMethod == (IR::JnHelperMethod)0);
+ Assert(instr->GetSrc2() == nullptr);
+ instr->m_opcode = instr->GetSrc1()->IsFloat64() ? Js::OpCode::SQRTSD : Js::OpCode::SQRTSS;
+
+ IR::Opnd *src = instr->GetSrc1();
+ IR::Opnd *dst = instr->GetDst();
+ if (!src->IsEqual(dst))
+ {
+ Assert(src->IsRegOpnd() && dst->IsRegOpnd());
+ // Force source to be the same as destination to break false dependency on the register
+ Lowerer::InsertMove(dst, src, instr, false /* generateWriteBarrier */);
+ instr->ReplaceSrc1(dst);
+ }
+
+ break;
+ }
case Js::OpCode::InlineMathAbs:
Assert(helperMethod == (IR::JnHelperMethod)0);
@@ -8233,7 +8276,7 @@ void LowererMD::GenerateFastInlineBuiltInCall(IR::Instr* instr, IR::JnHelperMeth
// CMP src1, src2
if(dst->IsInt32())
- {
+ {
if(min)
{
// JLT $continueLabel
@@ -8698,7 +8741,6 @@ LowererMD::InsertCmovCC(const Js::OpCode opCode, IR::Opnd * dst, IR::Opnd* src1,
return instr;
}
-
IR::BranchInstr*
LowererMD::InsertMissingItemCompareBranch(IR::Opnd* compareSrc, IR::Opnd* missingItemOpnd, Js::OpCode opcode, IR::LabelInstr* target, IR::Instr* insertBeforeInstr)
{
diff --git a/deps/chakrashim/core/lib/Backend/LowerMDShared.h b/deps/chakrashim/core/lib/Backend/LowerMDShared.h
index 2b57f71de31..8e8762e8a87 100644
--- a/deps/chakrashim/core/lib/Backend/LowerMDShared.h
+++ b/deps/chakrashim/core/lib/Backend/LowerMDShared.h
@@ -63,7 +63,7 @@ class LowererMD
static void ChangeToSub(IR::Instr *const instr, const bool needFlags);
static void ChangeToShift(IR::Instr *const instr, const bool needFlags);
static void ChangeToIMul(IR::Instr *const instr, const bool hasOverflowCheck = false);
- static const uint16 GetFormalParamOffset();
+ static uint16 GetFormalParamOffset();
static const Js::OpCode MDUncondBranchOpcode;
static const Js::OpCode MDMultiBranchOpcode;
static const Js::OpCode MDExtend32Opcode;
diff --git a/deps/chakrashim/core/lib/Backend/LowerMDSharedSimd128.cpp b/deps/chakrashim/core/lib/Backend/LowerMDSharedSimd128.cpp
index f2a8c5a9d4b..894468ef8ae 100644
--- a/deps/chakrashim/core/lib/Backend/LowerMDSharedSimd128.cpp
+++ b/deps/chakrashim/core/lib/Backend/LowerMDSharedSimd128.cpp
@@ -1162,7 +1162,7 @@ IR::Instr* LowererMD::Simd128LowerShift(IR::Instr *instr)
IR::RegOpnd *tmp1 = IR::RegOpnd::New(src1->GetType(), m_func);
IR::RegOpnd *tmp2 = IR::RegOpnd::New(src1->GetType(), m_func);
- //Shift amount: The shift amout is masked by [ElementSize] * 8
+ //Shift amount: The shift amount is masked by [ElementSize] * 8
//The masked Shift amount is moved to xmm register
//AND shamt, shmask, shamt
//MOVD tmp0, shamt
@@ -3306,4 +3306,4 @@ BYTE LowererMD::Simd128GetTypedArrBytesPerElem(ValueType arrType)
return (1 << Lowerer::GetArrayIndirScale(arrType));
}
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/NativeCodeGenerator.cpp b/deps/chakrashim/core/lib/Backend/NativeCodeGenerator.cpp
index 8925d809cbe..62a65e6f1e3 100644
--- a/deps/chakrashim/core/lib/Backend/NativeCodeGenerator.cpp
+++ b/deps/chakrashim/core/lib/Backend/NativeCodeGenerator.cpp
@@ -401,7 +401,7 @@ void NativeCodeGenerator::Jit_TransitionFromSimpleJit(void *const framePointer)
{
JIT_HELPER_NOT_REENTRANT_NOLOCK_HEADER(TransitionFromSimpleJit);
TransitionFromSimpleJit(
- Js::ScriptFunction::FromVar(Js::JavascriptCallStackLayout::FromFramePointer(framePointer)->functionObject));
+ Js::VarTo(Js::JavascriptCallStackLayout::FromFramePointer(framePointer)->functionObject));
JIT_HELPER_END(TransitionFromSimpleJit);
}
@@ -740,7 +740,7 @@ NativeCodeGenerator::IsValidVar(const Js::Var var, Recycler *const recycler)
}
#endif
- RecyclableObject *const recyclableObject = RecyclableObject::UnsafeFromVar(var);
+ RecyclableObject *const recyclableObject = UnsafeVarTo(var);
if(!recycler->IsValidObject(recyclableObject, sizeof(*recyclableObject)))
{
return false;
@@ -968,7 +968,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
throw Js::OperationAbortedException();
}
-
+
#if ENABLE_OOP_NATIVE_CODEGEN
if (JITManager::GetJITManager()->IsOOPJITEnabled())
{
@@ -1024,7 +1024,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
Output::Flush();
}
- epInfo->GetNativeEntryPointData()->SetFrameHeight(jitWriteData.frameHeight);
+ epInfo->GetNativeEntryPointData()->SetFrameHeight(jitWriteData.frameHeight);
if (workItem->Type() == JsFunctionType)
{
@@ -1234,10 +1234,6 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
{
body->GetAnyDynamicProfileInfo()->DisableTrackCompoundedIntOverflow();
}
- if (jitWriteData.disableMemOp)
- {
- body->GetAnyDynamicProfileInfo()->DisableMemOp();
- }
}
if (jitWriteData.disableInlineApply)
@@ -2309,7 +2305,7 @@ NativeCodeGenerator::GatherCodeGenData(
{
// TODO: For now, we create the native entry point data and the jit transfer data when we queue up
// the entry point for code gen, but not clear/free then then the work item got knocked off the queue
- // without code gen happening.
+ // without code gen happening.
nativeEntryPointData = entryPoint->EnsureNativeEntryPointData();
nativeEntryPointData->EnsureJitTransferData(recycler);
@@ -2429,11 +2425,11 @@ NativeCodeGenerator::GatherCodeGenData(
{
Js::InlineCache *inlineCache = nullptr;
- if(function && Js::ScriptFunctionWithInlineCache::Is(function))
+ if(function && Js::VarIs(function))
{
- if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
+ if (Js::VarTo(function)->GetInlineCaches() != nullptr)
{
- inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
+ inlineCache = Js::VarTo(function)->GetInlineCache(i);
}
}
else
@@ -2565,11 +2561,11 @@ NativeCodeGenerator::GatherCodeGenData(
}
}
// Even if the FldInfo says that the field access may be polymorphic, be optimistic that if the function object has inline caches, they'll be monomorphic
- else if(function && Js::ScriptFunctionWithInlineCache::Is(function) && (cacheType & Js::FldInfo_InlineCandidate || !polymorphicCacheOnFunctionBody))
+ else if(function && Js::VarIs(function) && (cacheType & Js::FldInfo_InlineCandidate || !polymorphicCacheOnFunctionBody))
{
- if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
+ if (Js::VarTo(function)->GetInlineCaches() != nullptr)
{
- Js::InlineCache *inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i);
+ Js::InlineCache *inlineCache = Js::VarTo(function)->GetInlineCache(i);
ObjTypeSpecFldInfo* objTypeSpecFldInfo = nullptr;
if(!PHASE_OFF(Js::ObjTypeSpecPhase, functionBody) || !PHASE_OFF(Js::FixedMethodsPhase, functionBody))
@@ -2702,8 +2698,8 @@ NativeCodeGenerator::GatherCodeGenData(
// Clone polymorphic inline caches for runtime usage in this inlinee. The JIT should only use the pointers to
// the inline caches, as their cached data is not guaranteed to be stable while jitting.
Js::InlineCache *const inlineCache =
- function && Js::ScriptFunctionWithInlineCache::Is(function)
- ? (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr ? Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(i) : nullptr)
+ function && Js::VarIs(function)
+ ? (Js::VarTo(function)->GetInlineCaches() != nullptr ? Js::VarTo(function)->GetInlineCache(i) : nullptr)
: functionBody->GetInlineCache(i);
if (inlineCache != nullptr)
@@ -2832,11 +2828,11 @@ NativeCodeGenerator::GatherCodeGenData(
Js::InlineCache * inlineCache = nullptr;
if ((ldFldInlineCacheIndex != Js::Constants::NoInlineCacheIndex) && (ldFldInlineCacheIndex < functionBody->GetInlineCacheCount()))
{
- if(function && Js::ScriptFunctionWithInlineCache::Is(function))
+ if(function && Js::VarIs(function))
{
- if (Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCaches() != nullptr)
+ if (Js::VarTo(function)->GetInlineCaches() != nullptr)
{
- inlineCache = Js::ScriptFunctionWithInlineCache::FromVar(function)->GetInlineCache(ldFldInlineCacheIndex);
+ inlineCache = Js::VarTo(function)->GetInlineCache(ldFldInlineCacheIndex);
}
}
else
@@ -3133,7 +3129,7 @@ NativeCodeGenerator::GatherCodeGenData(Js::FunctionBody *const topFunctionBody,
topFunctionBody->GetDisplayName(), topFunctionBody->GetDebugNumberSet(debugStringBuffer), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer2));
}
#endif
- GatherCodeGenData(recycler, topFunctionBody, functionBody, entryPoint, inliningDecider, objTypeSpecFldInfoList, jitTimeData, nullptr, function ? Js::JavascriptFunction::FromVar(function) : nullptr, 0);
+ GatherCodeGenData(recycler, topFunctionBody, functionBody, entryPoint, inliningDecider, objTypeSpecFldInfoList, jitTimeData, nullptr, function ? Js::VarTo(function) : nullptr, 0);
jitTimeData->sharedPropertyGuards = entryPoint->GetNativeEntryPointData()->GetSharedPropertyGuards(recycler, jitTimeData->sharedPropertyGuardCount);
@@ -3283,7 +3279,7 @@ void
FreeNativeCodeGenAllocation(Js::ScriptContext *scriptContext, Js::JavascriptMethod codeAddress, Js::JavascriptMethod thunkAddress)
{
if (!scriptContext->GetNativeCodeGenerator())
- {
+ {
return;
}
diff --git a/deps/chakrashim/core/lib/Backend/NativeEntryPointData.cpp b/deps/chakrashim/core/lib/Backend/NativeEntryPointData.cpp
index 660df2a2782..2f85b292c69 100644
--- a/deps/chakrashim/core/lib/Backend/NativeEntryPointData.cpp
+++ b/deps/chakrashim/core/lib/Backend/NativeEntryPointData.cpp
@@ -321,21 +321,20 @@ NativeEntryPointData::CleanupXDataInfo()
{
if (this->xdataInfo != nullptr)
{
- XDataAllocator::Unregister(this->xdataInfo);
#ifdef _WIN32
- if (AutoSystemInfo::Data.IsWin8OrLater())
+ if (this->xdataInfo->functionTable
+ && !DelayDeletingFunctionTable::AddEntry(this->xdataInfo->functionTable))
{
- // transfers ownership of xdataInfo object
- if(!DelayDeletingFunctionTable::AddEntry(this->xdataInfo))
- {
- DelayDeletingFunctionTable::DeleteFunctionTable(this->xdataInfo);
- }
+ DelayDeletingFunctionTable::DeleteFunctionTable(this->xdataInfo->functionTable);
}
- else
+#endif
+ XDataAllocator::Unregister(this->xdataInfo);
+#if defined(_M_ARM)
+ if (JITManager::GetJITManager()->IsOOPJITEnabled())
+#endif
{
HeapDelete(this->xdataInfo);
}
-#endif
this->xdataInfo = nullptr;
}
}
@@ -567,4 +566,4 @@ OOPNativeEntryPointData::DeleteNativeDataBuffer(char * nativeDataBuffer)
midl_user_free(buffer);
}
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/NativeEntryPointData.h b/deps/chakrashim/core/lib/Backend/NativeEntryPointData.h
index c9f017a9460..915cfa80d8a 100644
--- a/deps/chakrashim/core/lib/Backend/NativeEntryPointData.h
+++ b/deps/chakrashim/core/lib/Backend/NativeEntryPointData.h
@@ -71,8 +71,7 @@ class NativeEntryPointData
#if PDATA_ENABLED
XDataAllocation* GetXDataInfo() { return this->xdataInfo; }
- void CleanupXDataInfo();
- void SetXDataInfo(XDataAllocation* xdataInfo) { this->xdataInfo = xdataInfo; }
+ void SetXDataInfo(XDataAllocation* xdataInfo) { this->xdataInfo = xdataInfo; }
#endif
private:
void RegisterEquivalentTypeCaches(Js::ScriptContext * scriptContext, Js::EntryPointInfo * entryPointInfo);
@@ -80,6 +79,9 @@ class NativeEntryPointData
void FreePropertyGuards();
void FreeNativeCode(Js::ScriptContext * scriptContext, bool isShutdown);
+#if PDATA_ENABLED
+ void CleanupXDataInfo();
+#endif
FieldNoBarrier(Js::JavascriptMethod) nativeAddress;
FieldNoBarrier(Js::JavascriptMethod) thunkAddress;
@@ -212,4 +214,4 @@ class OOPNativeEntryPointData : public NativeEntryPointData
#endif
};
-#endif
\ No newline at end of file
+#endif
diff --git a/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.cpp b/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.cpp
index 86386733ba5..a4436fb4b97 100644
--- a/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.cpp
+++ b/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.cpp
@@ -45,7 +45,7 @@ ObjTypeSpecFldInfo::NeedsDepolymorphication() const
return m_data.polymorphicInfoArray != nullptr;
}
-void
+void
ObjTypeSpecFldInfo::TryDepolymorphication(JITTypeHolder type, uint16 slotIndex, bool usesAuxSlot, uint16 * pNewSlotIndex, bool * pNewUsesAuxSlot, uint16 * checkedTypeSetIndex) const
{
Assert(NeedsDepolymorphication());
@@ -505,7 +505,7 @@ ObjTypeSpecFldInfo* ObjTypeSpecFldInfo::CreateFrom(uint id, Js::InlineCache* cac
propertyGuard = entryPoint->GetNativeEntryPointData()->RegisterSharedPropertyGuard(propertyId, scriptContext);
}
- if (fixedProperty != nullptr && Js::JavascriptFunction::Is(fixedProperty))
+ if (fixedProperty != nullptr && Js::VarIs(fixedProperty))
{
functionObject = (Js::JavascriptFunction *)fixedProperty;
if (PHASE_VERBOSE_TRACE(Js::FixedMethodsPhase, functionBody))
@@ -742,7 +742,7 @@ ObjTypeSpecFldInfo* ObjTypeSpecFldInfo::CreateFrom(uint id, Js::PolymorphicInlin
uint16 firstNonEmptyCacheIndex = UINT16_MAX;
uint16 slotIndex = 0;
bool areEquivalent = true;
- bool canDepolymorphize = topFunctionBody != functionBody && PHASE_ON(Js::DepolymorphizeInlineesPhase, topFunctionBody);
+ bool canDepolymorphize = topFunctionBody != functionBody && !PHASE_OFF(Js::DepolymorphizeInlineesPhase, topFunctionBody);
bool usesAuxSlot = false;
bool isProto = false;
bool isAccessor = false;
@@ -860,7 +860,7 @@ ObjTypeSpecFldInfo* ObjTypeSpecFldInfo::CreateFrom(uint id, Js::PolymorphicInlin
{
areEquivalent = false;
}
- if (!isAccessor || isGetterAccessor != inlineCache.IsGetterAccessor() || !isAccessorOnProto || !inlineCache.u.accessor.isOnProto ||
+ if (!isAccessor || isGetterAccessor != inlineCache.IsGetterAccessor() || !isAccessorOnProto || !inlineCache.u.accessor.isOnProto ||
accessorOwnerObject != inlineCache.u.accessor.object || typeId != TypeWithoutAuxSlotTag(inlineCache.u.accessor.type)->GetTypeId())
{
areEquivalent = false;
diff --git a/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.h b/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.h
index 6e7d99935b4..df130e6117e 100644
--- a/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.h
+++ b/deps/chakrashim/core/lib/Backend/ObjTypeSpecFldInfo.h
@@ -191,8 +191,8 @@ class ObjTypeSpecFldInfo
if (PHASE_OFF1(Js::ObjTypeSpecPhase)) return nullptr; // TODO: (lei)remove this after obj type spec for OOPJIT implemented
- return m_data.fixedFieldInfoArray[0].fieldValue != 0 && Js::JavascriptFunction::Is((Js::Var)m_data.fixedFieldInfoArray[0].fieldValue) ?
- Js::JavascriptFunction::FromVar((Js::Var)m_data.fixedFieldInfoArray[0].fieldValue) : nullptr;
+ return m_data.fixedFieldInfoArray[0].fieldValue != 0 && Js::VarIs((Js::Var)m_data.fixedFieldInfoArray[0].fieldValue) ?
+ Js::VarTo((Js::Var)m_data.fixedFieldInfoArray[0].fieldValue) : nullptr;
}
Js::TypeId GetTypeId() const;
diff --git a/deps/chakrashim/core/lib/Backend/Opnd.cpp b/deps/chakrashim/core/lib/Backend/Opnd.cpp
index 35594cade16..d56410e41b8 100644
--- a/deps/chakrashim/core/lib/Backend/Opnd.cpp
+++ b/deps/chakrashim/core/lib/Backend/Opnd.cpp
@@ -962,8 +962,7 @@ PropertySymOpnd::IsObjectHeaderInlined() const
bool
PropertySymOpnd::ChangesObjectLayout() const
{
- JITTypeHolder cachedType = this->HasInitialType() ? this->GetInitialType() :
- this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();
+ JITTypeHolder cachedType = this->IsMono() ? this->GetType() : this->GetFirstEquivalentType();
JITTypeHolder finalType = this->GetFinalType();
@@ -988,11 +987,13 @@ PropertySymOpnd::ChangesObjectLayout() const
// This is the case where the type transition actually occurs. (This is the only case that's detectable
// during the loop pre-pass, since final types are not in place yet.)
+ Assert(cachedType != nullptr && Js::DynamicType::Is(cachedType->GetTypeId()));
+
+ const JITTypeHandler * cachedTypeHandler = cachedType->GetTypeHandler();
const JITTypeHandler * initialTypeHandler = initialType->GetTypeHandler();
- // If no final type has been set in the forward pass, then we have no way of knowing how the object shape will evolve here.
- // If the initial type is object-header-inlined, assume that the layout may change.
- return initialTypeHandler->IsObjectHeaderInlinedTypeHandler();
+ return cachedTypeHandler->GetInlineSlotCapacity() != initialTypeHandler->GetInlineSlotCapacity() ||
+ cachedTypeHandler->GetOffsetOfInlineSlots() != initialTypeHandler->GetOffsetOfInlineSlots();
}
return false;
@@ -1015,7 +1016,7 @@ PropertySymOpnd::UpdateSlotForFinalType()
return;
}
- // TODO: OOP JIT: should assert about runtime type handler addr
+ // TODO: OOP JIT: should assert about runtime type handler addr
Assert(cachedType->GetTypeHandler() != finalType->GetTypeHandler());
if (cachedType->GetTypeHandler()->GetInlineSlotCapacity() == finalType->GetTypeHandler()->GetInlineSlotCapacity() &&
@@ -1055,24 +1056,6 @@ bool PropertySymOpnd::HasFinalType() const
return this->finalType != nullptr;
}
-bool PropertySymOpnd::NeedsAuxSlotPtrSymLoad() const
-{
- // Consider: reload based on guarded prop ops' use of aux slots
- return this->GetAuxSlotPtrSym() != nullptr;
-}
-
-void PropertySymOpnd::GenerateAuxSlotPtrSymLoad(IR::Instr * instrInsert)
-{
- StackSym * auxSlotPtrSym = GetAuxSlotPtrSym();
- Assert(auxSlotPtrSym);
- Func * func = instrInsert->m_func;
-
- IR::Opnd *opndIndir = IR::IndirOpnd::New(this->CreatePropertyOwnerOpnd(func), Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, func);
- IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, func);
- regOpnd->SetIsJITOptimizedReg(true);
- Lowerer::InsertMove(regOpnd, opndIndir, instrInsert);
-}
-
PropertySymOpnd *
PropertySymOpnd::CloneDefInternalSub(Func *func)
{
@@ -1085,7 +1068,7 @@ PropertySymOpnd::CloneUseInternalSub(Func *func)
return this->CopyInternalSub(func);
}
-bool
+bool
PropertySymOpnd::ShouldUsePolyEquivTypeGuard(Func *const func) const
{
return this->IsPoly() && this->m_polyCacheUtil >= PolymorphicInlineCacheUtilizationThreshold && !PHASE_OFF(Js::PolyEquivTypeGuardPhase, func);
@@ -2537,7 +2520,6 @@ IndirOpnd::New(RegOpnd *baseOpnd, int32 offset, IRType type, Func *func, bool do
indirOpnd->m_type = type;
indirOpnd->SetIsJITOptimizedReg(false);
- indirOpnd->m_conversionAllowed = false;
indirOpnd->m_kind = OpndKindIndir;
@@ -2596,7 +2578,6 @@ IndirOpnd::CopyInternal(Func *func)
newOpnd->canStoreTemp = this->canStoreTemp;
newOpnd->SetOffset(m_offset, m_dontEncode);
newOpnd->SetIsJITOptimizedReg(this->GetIsJITOptimizedReg());
- newOpnd->m_conversionAllowed = this->m_conversionAllowed;
#if DBG_DUMP
newOpnd->m_addrKind = m_addrKind;
@@ -3251,7 +3232,7 @@ Opnd::Dump(IRDumpFlags flags, Func *func)
Output::Print(_u("%s"), func->GetInProcThreadContext()->GetPropertyRecord(propertyOpInfo->GetPropertyId())->GetBuffer(), propertyOpId);
}
Output::Print(_u("(%u)"), propertyOpId);
-
+
if (propertyOpInfo->IsLoadedFromProto())
{
Output::Print(_u("~"));
@@ -3646,13 +3627,13 @@ Opnd::GetAddrDescription(__out_ecount(count) char16 *const description, const si
}
else
{
- switch (Js::RecyclableObject::FromVar(address)->GetTypeId())
+ switch (Js::VarTo(address)->GetTypeId())
{
case Js::TypeIds_Boolean:
- WriteToBuffer(&buffer, &n, Js::JavascriptBoolean::FromVar(address)->GetValue() ? _u(" (true)") : _u(" (false)"));
+ WriteToBuffer(&buffer, &n, Js::VarTo(address)->GetValue() ? _u(" (true)") : _u(" (false)"));
break;
case Js::TypeIds_String:
- WriteToBuffer(&buffer, &n, _u(" (\"%s\")"), Js::JavascriptString::FromVar(address)->GetSz());
+ WriteToBuffer(&buffer, &n, _u(" (\"%s\")"), Js::VarTo(address)->GetSz());
break;
case Js::TypeIds_Number:
WriteToBuffer(&buffer, &n, _u(" (value: %f)"), Js::JavascriptNumber::GetValue(address));
@@ -3849,9 +3830,9 @@ Opnd::GetAddrDescription(__out_ecount(count) char16 *const description, const si
DumpAddress(address, printToConsole, skipMaskedAddress);
{
Js::RecyclableObject * dynamicObject = (Js::RecyclableObject *)((intptr_t)address - Js::RecyclableObject::GetOffsetOfType());
- if (!func->IsOOPJIT() && Js::JavascriptFunction::Is(dynamicObject))
+ if (!func->IsOOPJIT() && Js::VarIs(dynamicObject))
{
- DumpFunctionInfo(&buffer, &n, Js::JavascriptFunction::FromVar((void *)((intptr_t)address - Js::RecyclableObject::GetOffsetOfType()))->GetFunctionInfo(),
+ DumpFunctionInfo(&buffer, &n, Js::VarTo((void *)((intptr_t)address - Js::RecyclableObject::GetOffsetOfType()))->GetFunctionInfo(),
printToConsole, _u("FunctionObjectTypeRef"));
}
else
diff --git a/deps/chakrashim/core/lib/Backend/Opnd.h b/deps/chakrashim/core/lib/Backend/Opnd.h
index 492eb03bead..44f3925a65e 100644
--- a/deps/chakrashim/core/lib/Backend/Opnd.h
+++ b/deps/chakrashim/core/lib/Backend/Opnd.h
@@ -636,6 +636,8 @@ class PropertySymOpnd sealed : public SymOpnd
// Note that even usesFixedValue cannot live on ObjTypeSpecFldInfo, because we may share a cache between
// e.g. Object.prototype and new Object(), and only the latter actually uses the fixed value, even though both have it.
bool usesFixedValue: 1;
+ bool auxSlotPtrSymAvailable:1;
+ bool producesAuxSlotPtr:1;
union
{
@@ -799,17 +801,9 @@ class PropertySymOpnd sealed : public SymOpnd
return this->monoGuardType;
}
- bool SetMonoGuardType(JITTypeHolder type)
+ void SetMonoGuardType(JITTypeHolder type)
{
- if (!(this->monoGuardType == nullptr || this->monoGuardType == type) ||
- !((HasEquivalentTypeSet() && GetEquivalentTypeSet()->Contains(type)) ||
- (!HasEquivalentTypeSet() && GetType() == type)))
- {
- // Required type is not in the available set, or we already set the type to something else. Inform the caller.
- return false;
- }
this->monoGuardType = type;
- return true;
}
bool NeedsMonoCheck() const
@@ -972,6 +966,28 @@ class PropertySymOpnd sealed : public SymOpnd
this->typeDead = value;
}
+ bool IsAuxSlotPtrSymAvailable() const
+ {
+ return this->auxSlotPtrSymAvailable;
+ }
+
+ void SetAuxSlotPtrSymAvailable(bool value)
+ {
+ Assert(IsTypeCheckSeqCandidate());
+ this->auxSlotPtrSymAvailable = value;
+ }
+
+ bool ProducesAuxSlotPtr() const
+ {
+ return this->producesAuxSlotPtr;
+ }
+
+ void SetProducesAuxSlotPtr(bool value)
+ {
+ Assert(IsTypeCheckSeqCandidate());
+ this->producesAuxSlotPtr = value;
+ }
+
void SetTypeDeadIfTypeCheckSeqCandidate(bool value)
{
if (IsTypeCheckSeqCandidate())
@@ -1146,8 +1162,7 @@ class PropertySymOpnd sealed : public SymOpnd
// fall back on live cache. Similarly, for fixed method checks.
bool MayHaveImplicitCall() const
{
- return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected()
- || (IsLoadedFromProto() && NeedsWriteGuardTypeCheck()));
+ return !IsRootObjectNonConfigurableFieldLoad() && !UsesFixedValue() && (!IsTypeCheckSeqCandidate() || !IsTypeCheckProtected());
}
// Is the instruction involving this operand part of a type check sequence? This is different from IsObjTypeSpecOptimized
@@ -1177,9 +1192,6 @@ class PropertySymOpnd sealed : public SymOpnd
this->finalType = JITTypeHolder(nullptr);
}
- bool NeedsAuxSlotPtrSymLoad() const;
- void GenerateAuxSlotPtrSymLoad(IR::Instr * instrInsert);
-
BVSparse* GetGuardedPropOps()
{
return this->guardedPropOps;
@@ -1671,8 +1683,6 @@ class IndirOpnd: public Opnd
byte GetScale() const;
void SetScale(byte scale);
bool TryGetIntConstIndexValue(bool trySym, IntConstType *pValue, bool *pIsNotInt);
- void AllowConversion(bool value) { m_conversionAllowed = value; }
- bool ConversionAllowed() const { return m_conversionAllowed; }
#if DBG_DUMP || defined(ENABLE_IR_VIEWER)
const char16 * GetDescription();
IR::AddrOpndKind GetAddrKind() const;
@@ -1689,7 +1699,6 @@ class IndirOpnd: public Opnd
RegOpnd * m_indexOpnd;
int32 m_offset;
byte m_scale;
- bool m_conversionAllowed;
Func * m_func; // We need the allocator to copy the base and index...
#if DBG_DUMP || defined(ENABLE_IR_VIEWER)
diff --git a/deps/chakrashim/core/lib/Backend/PDataManager.cpp b/deps/chakrashim/core/lib/Backend/PDataManager.cpp
index 8a1514d68db..1b9e44a0d85 100644
--- a/deps/chakrashim/core/lib/Backend/PDataManager.cpp
+++ b/deps/chakrashim/core/lib/Backend/PDataManager.cpp
@@ -51,7 +51,8 @@ void PDataManager::UnregisterPdata(RUNTIME_FUNCTION* pdata)
{
if (AutoSystemInfo::Data.IsWin8OrLater())
{
- NtdllLibrary::Instance->DeleteGrowableFunctionTable(pdata);
+ // TODO: need to move to background?
+ DelayDeletingFunctionTable::DeleteFunctionTable(pdata);
}
else
{
diff --git a/deps/chakrashim/core/lib/Backend/SccLiveness.cpp b/deps/chakrashim/core/lib/Backend/SccLiveness.cpp
index 43373bb57e7..bc533f03c47 100644
--- a/deps/chakrashim/core/lib/Backend/SccLiveness.cpp
+++ b/deps/chakrashim/core/lib/Backend/SccLiveness.cpp
@@ -463,7 +463,7 @@ SCCLiveness::ProcessBailOutUses(IR::Instr * instr)
}
NEXT_BITSET_IN_SPARSEBV;
- FOREACH_SLISTBASE_ENTRY(CopyPropSyms, copyPropSyms, &bailOutInfo->usedCapturedValues.copyPropSyms)
+ FOREACH_SLISTBASE_ENTRY(CopyPropSyms, copyPropSyms, &bailOutInfo->usedCapturedValues->copyPropSyms)
{
ProcessStackSymUse(copyPropSyms.Value(), instr);
}
diff --git a/deps/chakrashim/core/lib/Backend/ServerScriptContext.cpp b/deps/chakrashim/core/lib/Backend/ServerScriptContext.cpp
index b3ce241b054..ac270a2c480 100644
--- a/deps/chakrashim/core/lib/Backend/ServerScriptContext.cpp
+++ b/deps/chakrashim/core/lib/Backend/ServerScriptContext.cpp
@@ -312,7 +312,6 @@ ServerScriptContext::IsClosed() const
void
ServerScriptContext::AddToDOMFastPathHelperMap(intptr_t funcInfoAddr, IR::JnHelperMethod helper)
{
- AutoCriticalSection cs(&m_cs);
m_domFastPathHelperMap->Add(funcInfoAddr, helper);
}
@@ -328,7 +327,7 @@ ServerScriptContext::DecommitEmitBufferManager(bool asmJsManager)
GetEmitBufferManager(asmJsManager)->Decommit();
}
-OOPEmitBufferManagerWithLock *
+OOPEmitBufferManager *
ServerScriptContext::GetEmitBufferManager(bool asmJsManager)
{
if (asmJsManager)
@@ -344,11 +343,11 @@ ServerScriptContext::GetEmitBufferManager(bool asmJsManager)
IR::JnHelperMethod
ServerScriptContext::GetDOMFastPathHelper(intptr_t funcInfoAddr)
{
- AutoCriticalSection cs(&m_cs);
-
IR::JnHelperMethod helper = IR::HelperInvalid;
+ m_domFastPathHelperMap->LockResize();
m_domFastPathHelperMap->TryGetValue(funcInfoAddr, &helper);
+ m_domFastPathHelperMap->UnlockResize();
return helper;
}
@@ -393,7 +392,6 @@ ServerScriptContext::GetCodeGenAllocators()
Field(Js::Var)*
ServerScriptContext::GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex)
{
- AutoCriticalSection cs(&m_cs);
AssertOrFailFast(m_moduleRecords.ContainsKey(moduleIndex));
auto record = m_moduleRecords.Item(moduleIndex);
return record->localExportSlotsAddr;
@@ -408,7 +406,6 @@ ServerScriptContext::SetIsPRNGSeeded(bool value)
void
ServerScriptContext::AddModuleRecordInfo(unsigned int moduleId, __int64 localExportSlotsAddr)
{
- AutoCriticalSection cs(&m_cs);
Js::ServerSourceTextModuleRecord* record = HeapNewStructZ(Js::ServerSourceTextModuleRecord);
record->moduleId = moduleId;
record->localExportSlotsAddr = (Field(Js::Var)*)localExportSlotsAddr;
diff --git a/deps/chakrashim/core/lib/Backend/ServerScriptContext.h b/deps/chakrashim/core/lib/Backend/ServerScriptContext.h
index 4d324461174..b20c406c974 100644
--- a/deps/chakrashim/core/lib/Backend/ServerScriptContext.h
+++ b/deps/chakrashim/core/lib/Backend/ServerScriptContext.h
@@ -80,7 +80,7 @@ class ServerScriptContext : public ScriptContextInfo
void SetIsPRNGSeeded(bool value);
void AddModuleRecordInfo(unsigned int moduleId, __int64 localExportSlotsAddr);
void UpdateGlobalObjectThisAddr(intptr_t globalThis);
- OOPEmitBufferManagerWithLock * GetEmitBufferManager(bool asmJsManager);
+ OOPEmitBufferManager * GetEmitBufferManager(bool asmJsManager);
void DecommitEmitBufferManager(bool asmJsManager);
#ifdef PROFILE_EXEC
Js::ScriptContextProfiler* GetCodeGenProfiler(_In_ PageAllocator* pageAllocator);
@@ -100,11 +100,10 @@ class ServerScriptContext : public ScriptContextInfo
Js::ScriptContextProfiler * codeGenProfiler;
CriticalSection profilerCS;
#endif
- CriticalSection m_cs;
ArenaAllocator m_sourceCodeArena;
- OOPEmitBufferManagerWithLock m_interpreterThunkBufferManager;
- OOPEmitBufferManagerWithLock m_asmJsInterpreterThunkBufferManager;
+ OOPEmitBufferManager m_interpreterThunkBufferManager;
+ OOPEmitBufferManager m_asmJsInterpreterThunkBufferManager;
ScriptContextDataIDL m_contextData;
intptr_t m_globalThisAddr;
diff --git a/deps/chakrashim/core/lib/Backend/SimpleJitProfilingHelpers.cpp b/deps/chakrashim/core/lib/Backend/SimpleJitProfilingHelpers.cpp
index 6a1fa9ca66f..756cdff2de0 100644
--- a/deps/chakrashim/core/lib/Backend/SimpleJitProfilingHelpers.cpp
+++ b/deps/chakrashim/core/lib/Backend/SimpleJitProfilingHelpers.cpp
@@ -69,7 +69,7 @@ using namespace Js;
DynamicProfileInfo * dynamicProfileInfo = callerFunctionBody->GetDynamicProfileInfo();
JavascriptFunction *const calleeFunction =
- JavascriptFunction::Is(callee) ? JavascriptFunction::FromVar(callee) : nullptr;
+ VarIs(callee) ? VarTo(callee) : nullptr;
FunctionInfo* calleeFunctionInfo = calleeFunction ? calleeFunction->GetFunctionInfo() : nullptr;
auto const ctor = !!(info.Flags & CallFlags_New);
diff --git a/deps/chakrashim/core/lib/Backend/SwitchIRBuilder.cpp b/deps/chakrashim/core/lib/Backend/SwitchIRBuilder.cpp
index b44b887536c..bef48576587 100644
--- a/deps/chakrashim/core/lib/Backend/SwitchIRBuilder.cpp
+++ b/deps/chakrashim/core/lib/Backend/SwitchIRBuilder.cpp
@@ -163,7 +163,7 @@ SwitchIRBuilder::BeginSwitch()
void
SwitchIRBuilder::EndSwitch(uint32 offset, uint32 targetOffset)
{
- FlushCases(offset);
+ FlushCases(targetOffset);
AssertMsg(m_caseNodes->Count() == 0, "Not all switch case nodes built by end of switch");
// only generate the final unconditional jump at the end of the switch
@@ -260,29 +260,24 @@ SwitchIRBuilder::OnCase(IR::RegOpnd * src1Opnd, IR::Opnd * src2Opnd, uint32 offs
//
// For optimizing, the Load instruction corresponding to the switch instruction is profiled in the interpreter.
// Based on the dynamic profile data, optimization technique is decided.
-
- // TODO: support switch opt when breaking out of loops
- if (!m_func->IsLoopBody() || (targetOffset < m_func->m_workItem->GetLoopHeader()->endOffset && targetOffset >= m_func->m_workItem->GetLoopHeader()->startOffset))
+ if (m_switchIntDynProfile && isIntConst && GlobOpt::IsSwitchOptEnabledForIntTypeSpec(m_func->GetTopFunc()))
{
- if (m_switchIntDynProfile && isIntConst && GlobOpt::IsSwitchOptEnabledForIntTypeSpec(m_func->GetTopFunc()))
- {
- CaseNode* caseNode = JitAnew(m_tempAlloc, CaseNode, branchInstr, offset, targetOffset, src2Opnd);
- m_caseNodes->Add(caseNode);
- return;
- }
- else if (m_switchStrDynProfile && isStrConst && GlobOpt::IsSwitchOptEnabled(m_func->GetTopFunc()))
- {
- CaseNode* caseNode = JitAnew(m_tempAlloc, CaseNode, branchInstr, offset, targetOffset, src2Opnd);
- m_caseNodes->Add(caseNode);
- m_seenOnlySingleCharStrCaseNodes = m_seenOnlySingleCharStrCaseNodes && caseNode->GetUpperBoundStringConstLocal()->GetLength() == 1;
- return;
- }
+ CaseNode* caseNode = JitAnew(m_tempAlloc, CaseNode, branchInstr, offset, targetOffset, src2Opnd);
+ m_caseNodes->Add(caseNode);
+ }
+ else if (m_switchStrDynProfile && isStrConst && GlobOpt::IsSwitchOptEnabled(m_func->GetTopFunc()))
+ {
+ CaseNode* caseNode = JitAnew(m_tempAlloc, CaseNode, branchInstr, offset, targetOffset, src2Opnd);
+ m_caseNodes->Add(caseNode);
+ m_seenOnlySingleCharStrCaseNodes = m_seenOnlySingleCharStrCaseNodes && caseNode->GetUpperBoundStringConstLocal()->GetLength() == 1;
+ }
+ else
+ {
+ // Otherwise, there are no optimizations to defer, so add the branch for
+ // this case instruction now
+ FlushCases(offset);
+ m_adapter->AddBranchInstr(branchInstr, offset, targetOffset);
}
-
- // Otherwise, there are no optimizations to defer, so add the branch for
- // this case instruction now
- FlushCases(offset);
- m_adapter->AddBranchInstr(branchInstr, offset, targetOffset);
}
diff --git a/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.cpp b/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.cpp
index 2a79a16ae1a..ad4a7261bab 100644
--- a/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.cpp
@@ -103,7 +103,7 @@ EncoderMD::Init(Encoder *encoder)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetOpcodeByte2(IR::Instr *instr)
{
return OpcodeByte2[instr->m_opcode - (Js::OpCode::MDStart+1)];
@@ -161,13 +161,13 @@ EncoderMD::GetOpbyte(IR::Instr *instr)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
{
return this->GetRegEncode(regOpnd->GetReg());
}
-const BYTE
+BYTE
EncoderMD::GetRegEncode(RegNum reg)
{
AssertMsg(reg != RegNOREG, "should have valid reg in encoder");
@@ -189,7 +189,7 @@ EncoderMD::GetRegEncode(RegNum reg)
///
///----------------------------------------------------------------------------
-const uint32
+uint32
EncoderMD::GetOpdope(IR::Instr *instr)
{
return Opdope[instr->m_opcode - (Js::OpCode::MDStart+1)];
@@ -203,7 +203,7 @@ EncoderMD::GetOpdope(IR::Instr *instr)
///
///----------------------------------------------------------------------------
-const uint32
+uint32
EncoderMD::GetLeadIn(IR::Instr * instr)
{
return OpcodeLeadIn[instr->m_opcode - (Js::OpCode::MDStart+1)];
diff --git a/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.h b/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.h
index 595afdd2c49..b1218854f5c 100644
--- a/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.h
+++ b/deps/chakrashim/core/lib/Backend/amd64/EncoderMD.h
@@ -202,14 +202,14 @@ class EncoderMD
void AddLabelReloc(BYTE* relocAddress);
private:
- const BYTE GetOpcodeByte2(IR::Instr *instr);
+ BYTE GetOpcodeByte2(IR::Instr *instr);
static Forms GetInstrForm(IR::Instr *instr);
const BYTE * GetFormTemplate(IR::Instr *instr);
const BYTE * GetOpbyte(IR::Instr *instr);
- const BYTE GetRegEncode(IR::RegOpnd *regOpnd);
- const BYTE GetRegEncode(RegNum reg);
- static const uint32 GetOpdope(IR::Instr *instr);
- const uint32 GetLeadIn(IR::Instr * instr);
+ BYTE GetRegEncode(IR::RegOpnd *regOpnd);
+ BYTE GetRegEncode(RegNum reg);
+ static uint32 GetOpdope(IR::Instr *instr);
+ uint32 GetLeadIn(IR::Instr * instr);
BYTE EmitModRM(IR::Instr * instr, IR::Opnd *opnd, BYTE reg1);
void EmitConst(size_t val, int size, bool allowImm64 = false);
BYTE EmitImmed(IR::Opnd * opnd, int opSize, int sbit, bool allowImm64 = false);
diff --git a/deps/chakrashim/core/lib/Backend/amd64/LinearScanMdA.S b/deps/chakrashim/core/lib/Backend/amd64/LinearScanMdA.S
index 36017544bfa..71363a324fe 100644
--- a/deps/chakrashim/core/lib/Backend/amd64/LinearScanMdA.S
+++ b/deps/chakrashim/core/lib/Backend/amd64/LinearScanMdA.S
@@ -51,23 +51,23 @@ LEAF_ENTRY _ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord, _TEXT
mov [rax + 15 * 8], r15
// Save all XMM regs (full width)
- movups xmmword ptr [rax + 0x80], xmm0 // [rax + 16 * 8 + 0 * 16] = xmm0
- movups xmmword ptr [rax + 0x90], xmm1 // [rax + 16 * 8 + 1 * 16] = xmm1
- movups xmmword ptr [rax + 0x0a0], xmm2 // ...
- // movups xmmword ptr [rax + 0x0b0], xmm3 // xplat: WHY this one fails to compile...
+ movups xmmword ptr [rax + 80h], xmm0 // [rax + 16 * 8 + 0 * 16] = xmm0
+ movups xmmword ptr [rax + 90h], xmm1 // [rax + 16 * 8 + 1 * 16] = xmm1
+ movups xmmword ptr [rax + 0a0h], xmm2 // ...
+ // movups xmmword ptr [rax + 0b0h], xmm3 // xplat: WHY this one fails to compile...
movups xmmword ptr [rax + 11 * 16], xmm3
- movups xmmword ptr [rax + 0x0c0], xmm4
- movups xmmword ptr [rax + 0x0d0], xmm5
- movups xmmword ptr [rax + 0x0e0], xmm6
- movups xmmword ptr [rax + 0x0f0], xmm7
- movups xmmword ptr [rax + 0x100], xmm8
- movups xmmword ptr [rax + 0x110], xmm9
- movups xmmword ptr [rax + 0x120], xmm10
- movups xmmword ptr [rax + 0x130], xmm11
- movups xmmword ptr [rax + 0x140], xmm12
- movups xmmword ptr [rax + 0x150], xmm13
- movups xmmword ptr [rax + 0x160], xmm14
- movups xmmword ptr [rax + 0x170], xmm15 // [rax + 16 * 8 + 15 * 16] = xmm15
+ movups xmmword ptr [rax + 0c0h], xmm4
+ movups xmmword ptr [rax + 0d0h], xmm5
+ movups xmmword ptr [rax + 0e0h], xmm6
+ movups xmmword ptr [rax + 0f0h], xmm7
+ movups xmmword ptr [rax + 100h], xmm8
+ movups xmmword ptr [rax + 110h], xmm9
+ movups xmmword ptr [rax + 120h], xmm10
+ movups xmmword ptr [rax + 130h], xmm11
+ movups xmmword ptr [rax + 140h], xmm12
+ movups xmmword ptr [rax + 150h], xmm13
+ movups xmmword ptr [rax + 160h], xmm14
+ movups xmmword ptr [rax + 170h], xmm15 // [rax + 16 * 8 + 15 * 16] = xmm15
ret
@@ -91,12 +91,12 @@ NESTED_ENTRY _ZN12LinearScanMD26SaveAllRegistersAndBailOutEP13BailOutRecord, _TE
mov [rsp + 3 * 8], rsi
- sub rsp, 0x28 // use the same as Windows x64 so register locations are the same
+ sub rsp, 28h // use the same as Windows x64 so register locations are the same
.cfi_adjust_cfa_offset 0x28
call C_FUNC(_ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord)
- add rsp, 0x28 // deallocate stack space
+ add rsp, 28h // deallocate stack space
.cfi_adjust_cfa_offset -0x28
jmp C_FUNC(_ZN13BailOutRecord7BailOutEPKS_)
@@ -117,12 +117,12 @@ NESTED_ENTRY _ZN12LinearScanMD32SaveAllRegistersAndBranchBailOutEP19BranchBailOu
// rdi == bailOutRecord
// rsi == condition
- sub rsp, 0x28 // use the same as Windows x64 so register locations are the same
+ sub rsp, 28h // use the same as Windows x64 so register locations are the same
.cfi_adjust_cfa_offset 0x28
call C_FUNC(_ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord)
- add rsp, 0x28 // deallocate stack space
+ add rsp, 28h // deallocate stack space
.cfi_adjust_cfa_offset -0x28
jmp C_FUNC(_ZN19BranchBailOutRecord7BailOutEPKS_i)
diff --git a/deps/chakrashim/core/lib/Backend/amd64/Thunks.S b/deps/chakrashim/core/lib/Backend/amd64/Thunks.S
index c3c37ff7c2e..4a7a9ed7e10 100644
--- a/deps/chakrashim/core/lib/Backend/amd64/Thunks.S
+++ b/deps/chakrashim/core/lib/Backend/amd64/Thunks.S
@@ -63,24 +63,24 @@ NESTED_ENTRY _ZN19NativeCodeGenerator22CheckAsmJsCodeGenThunkEPN2Js16RecyclableO
push r8
push r9
- sub rsp, 0x40
+ sub rsp, 40h
// ----- TODO: potentially xmm0-xmm7 args
// spill potential floating point arguments to stack
- movaps xmmword ptr [rsp + 0x00], xmm0
- movaps xmmword ptr [rsp + 0x10], xmm1
- movaps xmmword ptr [rsp + 0x20], xmm2
- movaps xmmword ptr [rsp + 0x30], xmm3
+ movaps xmmword ptr [rsp + 00h], xmm0
+ movaps xmmword ptr [rsp + 10h], xmm1
+ movaps xmmword ptr [rsp + 20h], xmm2
+ movaps xmmword ptr [rsp + 30h], xmm3
call C_FUNC(_ZN19NativeCodeGenerator17CheckAsmJsCodeGenEPN2Js14ScriptFunctionE)
// restore potential floating point arguments from stack
- movaps xmm0, xmmword ptr [rsp + 0x00]
- movaps xmm1, xmmword ptr [rsp + 0x10]
- movaps xmm2, xmmword ptr [rsp + 0x20]
- movaps xmm3, xmmword ptr [rsp + 0x30]
+ movaps xmm0, xmmword ptr [rsp + 00h]
+ movaps xmm1, xmmword ptr [rsp + 10h]
+ movaps xmm2, xmmword ptr [rsp + 20h]
+ movaps xmm3, xmmword ptr [rsp + 30h]
- add rsp, 0x40
+ add rsp, 40h
pop r9
pop r8
diff --git a/deps/chakrashim/core/lib/Backend/amd64/machvalues.h b/deps/chakrashim/core/lib/Backend/amd64/machvalues.h
index a5676a0060d..99f772de69c 100644
--- a/deps/chakrashim/core/lib/Backend/amd64/machvalues.h
+++ b/deps/chakrashim/core/lib/Backend/amd64/machvalues.h
@@ -18,4 +18,4 @@ static const int MachStackAlignment = MachPtr;
static const int MachArgsSlotOffset = MachPtr;
static const int MachMaxInstrSize = 12;
static const unsigned __int64 MachSignBit = 0x8000000000000000;
-static const int MachSimd128 = 16;
\ No newline at end of file
+static const int MachSimd128 = 16;
diff --git a/deps/chakrashim/core/lib/Backend/arm/EncoderMD.cpp b/deps/chakrashim/core/lib/Backend/arm/EncoderMD.cpp
index fc77dbfe964..f2fc0f44234 100644
--- a/deps/chakrashim/core/lib/Backend/arm/EncoderMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/arm/EncoderMD.cpp
@@ -34,19 +34,19 @@ EncoderMD::Init(Encoder *encoder)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
{
return GetRegEncode(regOpnd->GetReg());
}
-const BYTE
+BYTE
EncoderMD::GetRegEncode(RegNum reg)
{
return RegEncode[reg];
}
-const BYTE
+BYTE
EncoderMD::GetFloatRegEncode(IR::RegOpnd *regOpnd)
{
//Each double register holds two single precision registers.
diff --git a/deps/chakrashim/core/lib/Backend/arm/EncoderMD.h b/deps/chakrashim/core/lib/Backend/arm/EncoderMD.h
index eaed327c271..5869e7092e1 100644
--- a/deps/chakrashim/core/lib/Backend/arm/EncoderMD.h
+++ b/deps/chakrashim/core/lib/Backend/arm/EncoderMD.h
@@ -58,9 +58,9 @@ class EncoderMD
void ApplyRelocs(uint32 codeBufferAddress, size_t codeSize, uint* bufferCRC, BOOL isBrShorteningSucceeded, bool isFinalBufferValidation = false);
static bool TryConstFold(IR::Instr *instr, IR::RegOpnd *regOpnd);
static bool TryFold(IR::Instr *instr, IR::RegOpnd *regOpnd);
- const BYTE GetRegEncode(IR::RegOpnd *regOpnd);
- const BYTE GetFloatRegEncode(IR::RegOpnd *regOpnd);
- static const BYTE GetRegEncode(RegNum reg);
+ BYTE GetRegEncode(IR::RegOpnd *regOpnd);
+ BYTE GetFloatRegEncode(IR::RegOpnd *regOpnd);
+ static BYTE GetRegEncode(RegNum reg);
static uint32 GetOpdope(IR::Instr *instr);
static uint32 GetOpdope(Js::OpCode op);
diff --git a/deps/chakrashim/core/lib/Backend/arm/LowerMD.cpp b/deps/chakrashim/core/lib/Backend/arm/LowerMD.cpp
index 76495ea4b2f..caafe723e45 100644
--- a/deps/chakrashim/core/lib/Backend/arm/LowerMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/arm/LowerMD.cpp
@@ -209,10 +209,14 @@ LowererMD::LowerCallHelper(IR::Instr *instrCall)
Assert(regArg->m_sym->m_isSingleDef);
IR::Instr *instrArg = regArg->m_sym->m_instrDef;
- Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A ||
- (helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A));
+ Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A || instrArg->m_opcode == Js::OpCode::ExtendArg_A &&
+ (
+ helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperRestify ||
+ helperMethod == IR::JnHelperMethod::HelperStPropIdArrFromVar
+ ));
prevInstr = this->LoadHelperArgument(prevInstr, instrArg->GetSrc1());
argOpnd = instrArg->GetSrc2();
@@ -2870,9 +2874,161 @@ LowererMD::GenerateFastCmSrEqConst(IR::Instr *instr)
return false;
}
-bool LowererMD::GenerateFastCmXxI4(IR::Instr *instr)
+void LowererMD::GenerateFastCmXxI4(IR::Instr *instr)
{
- return this->GenerateFastCmXxTaggedInt(instr);
+ this->GenerateFastCmXx(instr);
+}
+
+void LowererMD::GenerateFastCmXxR8(IR::Instr * instr)
+{
+ this->GenerateFastCmXx(instr);
+}
+
+void LowererMD::GenerateFastCmXx(IR::Instr *instr)
+{
+ // For float src:
+ // LDIMM dst, trueResult
+ // FCMP src1, src2
+ // - BVS $done (NaN check iff B.cond is BNE)
+ // B.cond $done
+ // LDIMM dst, falseResult
+ // $done
+
+ // For Int src:
+ // LDIMM dst, trueResult
+ // CMP src1, src2
+ // B.cond $done
+ // LDIMM dst, falseResult
+ // $done:
+
+ IR::Opnd * src1 = instr->UnlinkSrc1();
+ IR::Opnd * src2 = instr->UnlinkSrc2();
+ IR::Opnd * dst = instr->UnlinkDst();
+ bool isIntDst = dst->AsRegOpnd()->m_sym->IsInt32();
+ bool isFloatSrc = src1->IsFloat();
+ Assert(!isFloatSrc || src2->IsFloat());
+ Assert(!src1->IsInt64() || src2->IsInt64());
+ Assert(!isFloatSrc || AutoSystemInfo::Data.SSE2Available());
+ Assert(src1->IsRegOpnd());
+ IR::Opnd * opndTrue;
+ IR::Opnd * opndFalse;
+ IR::Instr * newInstr;
+ IR::LabelInstr * done = IR::LabelInstr::New(Js::OpCode::Label, m_func);
+
+ if (dst->IsEqual(src1))
+ {
+ IR::RegOpnd *newSrc1 = IR::RegOpnd::New(src1->GetType(), m_func);
+ Lowerer::InsertMove(newSrc1, src1, instr);
+ src1 = newSrc1;
+ }
+
+ if (dst->IsEqual(src2))
+ {
+ IR::RegOpnd *newSrc2 = IR::RegOpnd::New(src1->GetType(), m_func);
+ Lowerer::InsertMove(newSrc2, src2, instr);
+ src2 = newSrc2;
+ }
+
+ if (isIntDst)
+ {
+ opndTrue = IR::IntConstOpnd::New(1, TyInt32, this->m_func);
+ opndFalse = IR::IntConstOpnd::New(0, TyInt32, this->m_func);
+ }
+ else
+ {
+ opndTrue = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue);
+ opndFalse = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse);
+ }
+
+ Lowerer::InsertMove(dst, opndTrue, instr);
+
+ // CMP src1, src2
+ newInstr = IR::Instr::New(isFloatSrc ? Js::OpCode::VCMPF64 : Js::OpCode::CMP, this->m_func);
+ newInstr->SetSrc1(src1);
+ newInstr->SetSrc2(src2);
+ instr->InsertBefore(newInstr);
+ LowererMD::Legalize(newInstr);
+
+ if (isFloatSrc)
+ {
+ instr->InsertBefore(IR::Instr::New(Js::OpCode::VMRS, this->m_func));
+ }
+
+ bool addNaNCheck = false;
+ Js::OpCode opcode = Js::OpCode::InvalidOpCode;
+
+ switch (instr->m_opcode)
+ {
+ case Js::OpCode::CmEq_A:
+ case Js::OpCode::CmSrEq_A:
+ case Js::OpCode::CmEq_I4:
+ opcode = Js::OpCode::BEQ;
+ break;
+
+ case Js::OpCode::CmNeq_A:
+ case Js::OpCode::CmSrNeq_A:
+ case Js::OpCode::CmNeq_I4:
+ opcode = Js::OpCode::BNE;
+ addNaNCheck = isFloatSrc;
+ break;
+
+ case Js::OpCode::CmGt_A:
+ case Js::OpCode::CmGt_I4:
+ opcode = Js::OpCode::BGT;
+ break;
+
+ case Js::OpCode::CmGe_A:
+ case Js::OpCode::CmGe_I4:
+ opcode = Js::OpCode::BGE;
+ break;
+
+ case Js::OpCode::CmLt_A:
+ case Js::OpCode::CmLt_I4:
+ //Can't use BLT as is set when the operands are unordered (NaN).
+ opcode = isFloatSrc ? Js::OpCode::BCC : Js::OpCode::BLT;
+ break;
+
+ case Js::OpCode::CmLe_A:
+ case Js::OpCode::CmLe_I4:
+ //Can't use BLE as it is set when the operands are unordered (NaN).
+ opcode = isFloatSrc ? Js::OpCode::BLS : Js::OpCode::BLE;
+ break;
+
+ case Js::OpCode::CmUnGt_A:
+ case Js::OpCode::CmUnGt_I4:
+ opcode = Js::OpCode::BHI;
+ break;
+
+ case Js::OpCode::CmUnGe_A:
+ case Js::OpCode::CmUnGe_I4:
+ opcode = Js::OpCode::BCS;
+ break;
+
+ case Js::OpCode::CmUnLt_A:
+ case Js::OpCode::CmUnLt_I4:
+ opcode = Js::OpCode::BCC;
+ break;
+
+ case Js::OpCode::CmUnLe_A:
+ case Js::OpCode::CmUnLe_I4:
+ opcode = Js::OpCode::BLS;
+ break;
+
+ default: Assert(false);
+ }
+
+ if (addNaNCheck)
+ {
+ newInstr = IR::BranchInstr::New(Js::OpCode::BVS, done, m_func);
+ instr->InsertBefore(newInstr);
+ }
+
+ newInstr = IR::BranchInstr::New(opcode, done, m_func);
+ instr->InsertBefore(newInstr);
+
+ Lowerer::InsertMove(dst, opndFalse, instr);
+ instr->InsertBefore(done);
+ instr->Remove();
}
///----------------------------------------------------------------------------
diff --git a/deps/chakrashim/core/lib/Backend/arm/LowerMD.h b/deps/chakrashim/core/lib/Backend/arm/LowerMD.h
index 28ee28d0a2d..0eb5bdcfd3d 100644
--- a/deps/chakrashim/core/lib/Backend/arm/LowerMD.h
+++ b/deps/chakrashim/core/lib/Backend/arm/LowerMD.h
@@ -94,8 +94,9 @@ class LowererMD
void GenerateObjectPairTest(IR::Opnd * opndSrc1, IR::Opnd * opndSrc2, IR::Instr * insertInstr, IR::LabelInstr * labelTarget);
bool GenerateObjectTest(IR::Opnd * opndSrc, IR::Instr * insertInstr, IR::LabelInstr * labelTarget, bool fContinueLabel = false);
bool GenerateFastCmSrEqConst(IR::Instr *instr);
- bool GenerateFastCmXxI4(IR::Instr *instr);
- bool GenerateFastCmXxR8(IR::Instr *instr) { Assert(UNREACHED); return nullptr; }
+ void GenerateFastCmXxI4(IR::Instr *instr);
+ void GenerateFastCmXxR8(IR::Instr *instr);
+ void GenerateFastCmXx(IR::Instr *instr);
bool GenerateFastCmXxTaggedInt(IR::Instr *instr, bool isInHelper = false);
IR::Instr * GenerateConvBool(IR::Instr *instr);
void GenerateClz(IR::Instr * instr);
diff --git a/deps/chakrashim/core/lib/Backend/arm/machvalues.h b/deps/chakrashim/core/lib/Backend/arm/machvalues.h
index eb770e33fc9..3d25f59d149 100644
--- a/deps/chakrashim/core/lib/Backend/arm/machvalues.h
+++ b/deps/chakrashim/core/lib/Backend/arm/machvalues.h
@@ -16,4 +16,4 @@ static const int MachPtr = 4;
static const int MachDouble = 8;
static const int MachRegDouble = 8;
static const int MachArgsSlotOffset = MachPtr;
-static const int MachStackAlignment = MachDouble;
\ No newline at end of file
+static const int MachStackAlignment = MachDouble;
diff --git a/deps/chakrashim/core/lib/Backend/arm64/ARM64NeonEncoder.h b/deps/chakrashim/core/lib/Backend/arm64/ARM64NeonEncoder.h
index cf21f6e1544..3424aad4292 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/ARM64NeonEncoder.h
+++ b/deps/chakrashim/core/lib/Backend/arm64/ARM64NeonEncoder.h
@@ -88,7 +88,7 @@ class NeonRegisterParam
)
{
UNREFERENCED_PARAMETER(Reg);
- NT_ASSERT(Reg >= NEONREG_FIRST && Reg <= NEONREG_LAST);
+ Assert(Reg >= NEONREG_FIRST && Reg <= NEONREG_LAST);
}
static
@@ -98,7 +98,7 @@ class NeonRegisterParam
)
{
UNREFERENCED_PARAMETER(Size);
- NT_ASSERT(Size == 4 || Size == 8 || Size == 16);
+ Assert(Size == 4 || Size == 8 || Size == 16);
}
ULONG m_Encoded;
@@ -133,27 +133,27 @@ NeonSize(
switch (ElementSizeInBytes)
{
case 1:
- NT_ASSERT(NumElements == 1 || NumElements == 8 || NumElements == 16);
+ Assert(NumElements == 1 || NumElements == 8 || NumElements == 16);
return (NumElements == 1) ? SIZE_1B : (NumElements == 8) ? SIZE_8B : SIZE_16B;
case 2:
- NT_ASSERT(NumElements == 1 || NumElements == 4 || NumElements == 8);
+ Assert(NumElements == 1 || NumElements == 4 || NumElements == 8);
return (NumElements == 1) ? SIZE_1H : (NumElements == 4) ? SIZE_4H : SIZE_8H;
case 4:
- NT_ASSERT(NumElements == 1 || NumElements == 2 || NumElements == 4);
+ Assert(NumElements == 1 || NumElements == 2 || NumElements == 4);
return (NumElements == 1) ? SIZE_1S : (NumElements == 2) ? SIZE_2S : SIZE_4S;
case 8:
- NT_ASSERT(NumElements == 1 || NumElements == 2);
+ Assert(NumElements == 1 || NumElements == 2);
return (NumElements == 1) ? SIZE_1D : SIZE_2D;
case 16:
- NT_ASSERT(NumElements == 1);
+ Assert(NumElements == 1);
return SIZE_1Q;
default:
- NT_ASSERT(!"Invalid element size passed to NeonSize.");
+ Assert(!"Invalid element size passed to NeonSize.");
return SIZE_1B;
}
}
@@ -257,10 +257,10 @@ EmitNeonBinaryCommon(
{
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | ((SrcSize & 3) << 22) | (Src.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | ((SrcSize & 3) << 22) | (Src.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -274,7 +274,7 @@ EmitNeonAbs(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e20b800, 0x5e20b800);
}
@@ -287,7 +287,7 @@ EmitNeonAddp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(SrcSize == SIZE_1D);
+ Assert(SrcSize == SIZE_1D);
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0, 0x5e31b800);
}
@@ -300,7 +300,7 @@ EmitNeonAddv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e31b800);
}
@@ -313,7 +313,7 @@ EmitNeonCls(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e204800);
}
@@ -326,7 +326,7 @@ EmitNeonClz(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e204800);
}
@@ -339,7 +339,7 @@ EmitNeonCmeq0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e209800, 0x5e209800);
}
@@ -352,7 +352,7 @@ EmitNeonCmge0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e208800, 0x7e208800);
}
@@ -365,7 +365,7 @@ EmitNeonCmgt0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e208800, 0x5e208800);
}
@@ -378,7 +378,7 @@ EmitNeonCmle0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e209800, 0x7e209800);
}
@@ -391,7 +391,7 @@ EmitNeonCmlt0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e20a800, 0x5e20a800);
}
@@ -404,7 +404,7 @@ EmitNeonCnt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e205800);
}
@@ -417,7 +417,7 @@ EmitNeonNeg(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e20b800, 0x7e20b800);
}
@@ -430,7 +430,7 @@ EmitNeonNot(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e205800);
}
@@ -443,7 +443,7 @@ EmitNeonRbit(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e605800);
}
@@ -456,7 +456,7 @@ EmitNeonRev16(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e201800);
}
@@ -469,7 +469,7 @@ EmitNeonRev32(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e200800);
}
@@ -482,7 +482,7 @@ EmitNeonRev64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e200800);
}
@@ -495,7 +495,7 @@ EmitNeonSadalp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e206800);
}
@@ -508,7 +508,7 @@ EmitNeonSaddlp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e202800);
}
@@ -521,7 +521,7 @@ EmitNeonSaddlv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e303800);
}
@@ -534,7 +534,7 @@ EmitNeonShll(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize & ~4), 0x2e213800);
}
@@ -547,7 +547,7 @@ EmitNeonShll2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize | 4), 0x2e213800);
}
@@ -560,7 +560,7 @@ EmitNeonSmaxv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e30a800);
}
@@ -573,7 +573,7 @@ EmitNeonSminv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e31a800);
}
@@ -586,7 +586,7 @@ EmitNeonSqabs(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e207800, 0x5e207800);
}
@@ -599,7 +599,7 @@ EmitNeonSqneg(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e207800, 0x7e207800);
}
@@ -612,7 +612,7 @@ EmitNeonSqxtn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) & ~4), 0x0e214800, 0x5e214800);
}
@@ -625,7 +625,7 @@ EmitNeonSqxtn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) | 4), 0x0e214800, 0x5e214800);
}
@@ -638,7 +638,7 @@ EmitNeonSqxtun(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) & ~4), 0x2e212800, 0x7e212800);
}
@@ -651,7 +651,7 @@ EmitNeonSqxtun2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) | 4), 0x2e212800, 0x7e212800);
}
@@ -664,7 +664,7 @@ EmitNeonSuqadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e203800, 0x5e203800);
}
@@ -677,7 +677,7 @@ EmitNeonUadalp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e206800);
}
@@ -690,7 +690,7 @@ EmitNeonUaddlp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e202800);
}
@@ -703,7 +703,7 @@ EmitNeonUaddlv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e303800);
}
@@ -716,7 +716,7 @@ EmitNeonUmaxv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e30a800);
}
@@ -729,7 +729,7 @@ EmitNeonUminv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_4S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e31a800);
}
@@ -742,7 +742,7 @@ EmitNeonUqxtn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) & ~4), 0x2e214800, 0x7e214800);
}
@@ -755,7 +755,7 @@ EmitNeonUqxtn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) | 4), 0x2e214800, 0x7e214800);
}
@@ -768,7 +768,7 @@ EmitNeonUrecpe(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea1c800);
}
@@ -781,7 +781,7 @@ EmitNeonUrsqrte(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea1c800);
}
@@ -794,7 +794,7 @@ EmitNeonUsqadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e203800, 0x7e203800);
}
@@ -807,7 +807,7 @@ EmitNeonXtn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) & ~4), 0x0e212800);
}
@@ -820,7 +820,7 @@ EmitNeonXtn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize - 1) | 4), 0x0e212800);
}
@@ -841,10 +841,10 @@ EmitNeonFloatBinaryCommon(
{
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | ((SrcSize & 1) << 22) | (Src.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | ((SrcSize & 1) << 22) | (Src.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -858,7 +858,7 @@ EmitNeonFabs(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea0f800, 0x1e20c000);
}
@@ -871,7 +871,7 @@ EmitNeonFaddp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x7e30d800);
}
@@ -884,7 +884,7 @@ EmitNeonFcmeq0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea0d800, 0x5ea0d800);
}
@@ -897,7 +897,7 @@ EmitNeonFcmge0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea0c800, 0x7ea0c800);
}
@@ -910,7 +910,7 @@ EmitNeonFcmgt0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea0c800, 0x5ea0c800);
}
@@ -923,7 +923,7 @@ EmitNeonFcmle0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea0d800, 0x7ea0d800);
}
@@ -936,7 +936,7 @@ EmitNeonFcmlt0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea0e800, 0x5ea0e800);
}
@@ -949,7 +949,7 @@ EmitNeonFcvtas(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e21c800, 0x5e21c800);
}
@@ -962,7 +962,7 @@ EmitNeonFcvtau(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e21c800, 0x7e21c800);
}
@@ -976,9 +976,9 @@ EmitNeonFcvt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D));
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_1H | VALID_1S | VALID_1D));
- NT_ASSERT(SrcSize != DestSize);
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DestSize, VALID_1H | VALID_1S | VALID_1D));
+ Assert(SrcSize != DestSize);
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0, 0x1ea34000 ^ ((SrcSize & 2) << 22) ^ ((DestSize & 3) << 15));
}
@@ -991,7 +991,7 @@ EmitNeonFcvtl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize + 1) & ~4), 0x0e217800);
}
@@ -1004,7 +1004,7 @@ EmitNeonFcvtl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE((SrcSize + 1) | 4), 0x0e217800);
}
@@ -1017,7 +1017,7 @@ EmitNeonFcvtms(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e21b800, 0x5e21b800);
}
@@ -1030,7 +1030,7 @@ EmitNeonFcvtmu(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e21b800, 0x7e21b800);
}
@@ -1043,7 +1043,7 @@ EmitNeonFcvtn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize & ~4), 0x0e216800);
}
@@ -1056,7 +1056,7 @@ EmitNeonFcvtn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize | 4), 0x4e216800);
}
@@ -1069,7 +1069,7 @@ EmitNeonFcvtns(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e21a800, 0x5e21a800);
}
@@ -1082,7 +1082,7 @@ EmitNeonFcvtnu(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e21a800, 0x7e21a800);
}
@@ -1095,7 +1095,7 @@ EmitNeonFcvtps(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea1a800, 0x5ea1a800);
}
@@ -1108,7 +1108,7 @@ EmitNeonFcvtpu(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea1a800, 0x7ea1a800);
}
@@ -1121,7 +1121,7 @@ EmitNeonFcvtxn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize & ~4), 0x2e216800, 0x7e216800);
}
@@ -1134,7 +1134,7 @@ EmitNeonFcvtxn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, NEON_SIZE(SrcSize | 4), 0x2e216800, 0x7e216800);
}
@@ -1147,7 +1147,7 @@ EmitNeonFcvtzs(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea1b800, 0x5ea1b800);
}
@@ -1160,7 +1160,7 @@ EmitNeonFcvtzu(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea1b800, 0x7ea1b800);
}
@@ -1173,7 +1173,7 @@ EmitNeonFmaxnmp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x7e30c800);
}
@@ -1186,7 +1186,7 @@ EmitNeonFmaxnmv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e30c800);
}
@@ -1199,7 +1199,7 @@ EmitNeonFmaxp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x7e30f800);
}
@@ -1212,7 +1212,7 @@ EmitNeonFmaxv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e30f800);
}
@@ -1225,7 +1225,7 @@ EmitNeonFminnmp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x7eb0c800);
}
@@ -1238,7 +1238,7 @@ EmitNeonFminnmv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2eb0c800);
}
@@ -1251,7 +1251,7 @@ EmitNeonFminp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_2S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x7eb0f800);
}
@@ -1264,7 +1264,7 @@ EmitNeonFminv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_4S));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2eb0f800);
}
@@ -1277,7 +1277,7 @@ EmitNeonFmov(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0, 0x1e204000);
}
@@ -1290,7 +1290,7 @@ EmitNeonFmovImmediate(
NEON_SIZE DestSize
)
{
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DestSize, VALID_1S | VALID_1D));
return Emitter.EmitFourBytes(0x1e201000 | ((DestSize & 1) << 22) | (ULONG(Immediate) << 13) | Dest.RawRegister());
}
@@ -1303,7 +1303,7 @@ EmitNeonFneg(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea0f800, 0x1e214000);
}
@@ -1316,7 +1316,7 @@ EmitNeonFrecpe(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea1d800, 0x5ea1d800);
}
@@ -1329,7 +1329,7 @@ EmitNeonFrecpx(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0, 0x5ea1f800);
}
@@ -1342,7 +1342,7 @@ EmitNeonFrinta(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e218800, 0x1e264000);
}
@@ -1355,7 +1355,7 @@ EmitNeonFrinti(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea19800, 0x1e27c000);
}
@@ -1368,7 +1368,7 @@ EmitNeonFrintm(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e219800, 0x1e254000);
}
@@ -1381,7 +1381,7 @@ EmitNeonFrintn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e218800, 0x1e244000);
}
@@ -1394,7 +1394,7 @@ EmitNeonFrintp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea18800, 0x1e24c000);
}
@@ -1407,7 +1407,7 @@ EmitNeonFrintx(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e219800, 0x1e274000);
}
@@ -1420,7 +1420,7 @@ EmitNeonFrintz(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0ea19800, 0x1e25c000);
}
@@ -1433,7 +1433,7 @@ EmitNeonFrsqrte(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea1d800, 0x7ea1d800);
}
@@ -1446,7 +1446,7 @@ EmitNeonFsqrt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2ea1f800, 0x1e21c000);
}
@@ -1459,7 +1459,7 @@ EmitNeonScvtf(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x0e21d800, 0x5e21d800);
}
@@ -1472,7 +1472,7 @@ EmitNeonUcvtf(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatBinaryCommon(Emitter, Dest, Src, SrcSize, 0x2e21d800, 0x7e21d800);
}
@@ -1494,10 +1494,10 @@ EmitNeonTrinaryCommon(
{
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | ((SrcSize & 3) << 22) | (Src2.RawRegister() << 16) | (Src1.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | ((SrcSize & 3) << 22) | (Src2.RawRegister() << 16) | (Src1.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -1512,7 +1512,7 @@ EmitNeonAdd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e208400, 0x5e208400);
}
@@ -1526,7 +1526,7 @@ EmitNeonAddhn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e204000);
}
@@ -1540,7 +1540,7 @@ EmitNeonAddhn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e204000);
}
@@ -1554,7 +1554,7 @@ EmitNeonAddp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20bc00);
}
@@ -1568,7 +1568,7 @@ EmitNeonAnd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e201c00);
}
@@ -1582,7 +1582,7 @@ EmitNeonBicRegister(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e601c00);
}
@@ -1596,7 +1596,7 @@ EmitNeonBif(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ee01c00);
}
@@ -1610,7 +1610,7 @@ EmitNeonBit(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea01c00);
}
@@ -1624,7 +1624,7 @@ EmitNeonBsl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e601c00);
}
@@ -1638,7 +1638,7 @@ EmitNeonCmeq(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e208c00, 0x7e208c00);
}
@@ -1652,7 +1652,7 @@ EmitNeonCmge(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e203c00, 0x5e203c00);
}
@@ -1666,7 +1666,7 @@ EmitNeonCmgt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e203400, 0x5e203400);
}
@@ -1680,7 +1680,7 @@ EmitNeonCmhi(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e203400, 0x7e203400);
}
@@ -1694,7 +1694,7 @@ EmitNeonCmhs(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e203c00, 0x7e203c00);
}
@@ -1708,7 +1708,7 @@ EmitNeonCmtst(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e208c00, 0x5e208c00);
}
@@ -1722,7 +1722,7 @@ EmitNeonEor(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e201c00);
}
@@ -1736,7 +1736,7 @@ EmitNeonMla(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e209400);
}
@@ -1750,7 +1750,7 @@ EmitNeonMls(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e209400);
}
@@ -1763,7 +1763,7 @@ EmitNeonMov(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src, Src, SrcSize, 0x0ea01c00);
}
@@ -1777,7 +1777,7 @@ EmitNeonMul(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e209c00);
}
@@ -1791,7 +1791,7 @@ EmitNeonOrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ee01c00);
}
@@ -1805,7 +1805,7 @@ EmitNeonOrrRegister(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea01c00);
}
@@ -1819,7 +1819,7 @@ EmitNeonPmul(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e209c00);
}
@@ -1833,7 +1833,7 @@ EmitNeonPmull(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_1D | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_1D | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e20e000, 0x0e20e000);
}
@@ -1847,7 +1847,7 @@ EmitNeonPmull2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e20e000, 0x0e20e000);
}
@@ -1861,7 +1861,7 @@ EmitNeonRaddhn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE((SrcSize - 1) & ~4), 0x2e204000);
}
@@ -1875,7 +1875,7 @@ EmitNeonRaddhn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE((SrcSize - 1) | 4), 0x2e204000);
}
@@ -1889,7 +1889,7 @@ EmitNeonRsubhn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE((SrcSize - 1) & ~4), 0x2e206000);
}
@@ -1903,7 +1903,7 @@ EmitNeonRsubhn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE((SrcSize - 1) | 4), 0x2e206000);
}
@@ -1917,7 +1917,7 @@ EmitNeonSaba(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e207c00);
}
@@ -1931,7 +1931,7 @@ EmitNeonSabal(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e205000);
}
@@ -1945,7 +1945,7 @@ EmitNeonSabal2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e205000);
}
@@ -1959,7 +1959,7 @@ EmitNeonSabd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e207400);
}
@@ -1973,7 +1973,7 @@ EmitNeonSabdl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e207000);
}
@@ -1987,7 +1987,7 @@ EmitNeonSabdl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e207000);
}
@@ -2001,7 +2001,7 @@ EmitNeonSaddl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e200000);
}
@@ -2015,7 +2015,7 @@ EmitNeonSaddl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e200000);
}
@@ -2029,7 +2029,7 @@ EmitNeonSaddw(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e201000);
}
@@ -2043,7 +2043,7 @@ EmitNeonSaddw2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e201000);
}
@@ -2057,7 +2057,7 @@ EmitNeonShadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e200400);
}
@@ -2071,7 +2071,7 @@ EmitNeonShsub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e202400);
}
@@ -2085,7 +2085,7 @@ EmitNeonSmax(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e206400);
}
@@ -2099,7 +2099,7 @@ EmitNeonSmaxp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20a400);
}
@@ -2113,7 +2113,7 @@ EmitNeonSmin(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e206c00);
}
@@ -2127,7 +2127,7 @@ EmitNeonSminp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20ac00);
}
@@ -2141,7 +2141,7 @@ EmitNeonSmlal(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e208000);
}
@@ -2155,7 +2155,7 @@ EmitNeonSmlal2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e208000);
}
@@ -2169,7 +2169,7 @@ EmitNeonSmlsl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e20a000);
}
@@ -2183,7 +2183,7 @@ EmitNeonSmlsl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e20a000);
}
@@ -2197,7 +2197,7 @@ EmitNeonSmull(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e20c000);
}
@@ -2211,7 +2211,7 @@ EmitNeonSmull2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e20c000);
}
@@ -2225,7 +2225,7 @@ EmitNeonSqadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e200c00, 0x5e200c00);
}
@@ -2239,7 +2239,7 @@ EmitNeonSqdmlal(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e209000, 0x5e209000);
}
@@ -2253,7 +2253,7 @@ EmitNeonSqdmlal2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e209000);
}
@@ -2267,7 +2267,7 @@ EmitNeonSqdmlsl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e20b000, 0x5e20b00);
}
@@ -2281,7 +2281,7 @@ EmitNeonSqdmlsl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e20b000);
}
@@ -2295,7 +2295,7 @@ EmitNeonSqdmulh(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20b400, 0x5e20b400);
}
@@ -2309,7 +2309,7 @@ EmitNeonSqdmull(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e20d000, 0x5e20d000);
}
@@ -2323,7 +2323,7 @@ EmitNeonSqdmull2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e20d000);
}
@@ -2337,7 +2337,7 @@ EmitNeonSqrdmulh(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20b400, 0x7e20b400);
}
@@ -2351,7 +2351,7 @@ EmitNeonSqrshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e205c00, 0x5e205c00);
}
@@ -2365,7 +2365,7 @@ EmitNeonSqshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e204c00, 0x5e204c00);
}
@@ -2379,7 +2379,7 @@ EmitNeonSqsub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e202c00, 0x5e202c00);
}
@@ -2393,7 +2393,7 @@ EmitNeonSrhadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e201400);
}
@@ -2407,7 +2407,7 @@ EmitNeonSrshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e205400, 0x5e205400);
}
@@ -2421,7 +2421,7 @@ EmitNeonSshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e204400, 0x5e204400);
}
@@ -2435,7 +2435,7 @@ EmitNeonSsubl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e202000);
}
@@ -2449,7 +2449,7 @@ EmitNeonSsubl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e202000);
}
@@ -2463,7 +2463,7 @@ EmitNeonSsubw(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e203000);
}
@@ -2477,7 +2477,7 @@ EmitNeonSsubw2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e203000);
}
@@ -2491,7 +2491,7 @@ EmitNeonSub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e208400, 0x7e208400);
}
@@ -2505,7 +2505,7 @@ EmitNeonSubhn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x0e206000);
}
@@ -2519,7 +2519,7 @@ EmitNeonSubhn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x0e206000);
}
@@ -2533,7 +2533,7 @@ EmitNeonTrn1(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e002800);
}
@@ -2547,7 +2547,7 @@ EmitNeonTrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e006800);
}
@@ -2561,7 +2561,7 @@ EmitNeonUaba(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e207c00);
}
@@ -2575,7 +2575,7 @@ EmitNeonUabal(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e205000);
}
@@ -2589,7 +2589,7 @@ EmitNeonUabal2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e205000);
}
@@ -2603,7 +2603,7 @@ EmitNeonUabd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e207400);
}
@@ -2617,7 +2617,7 @@ EmitNeonUabdl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e207000);
}
@@ -2631,7 +2631,7 @@ EmitNeonUabdl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e207000);
}
@@ -2645,7 +2645,7 @@ EmitNeonUaddl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e200000);
}
@@ -2659,7 +2659,7 @@ EmitNeonUaddl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e200000);
}
@@ -2673,7 +2673,7 @@ EmitNeonUaddw(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e201000);
}
@@ -2687,7 +2687,7 @@ EmitNeonUaddw2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e201000);
}
@@ -2701,7 +2701,7 @@ EmitNeonUhadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e200400);
}
@@ -2715,7 +2715,7 @@ EmitNeonUhsub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e202400);
}
@@ -2729,7 +2729,7 @@ EmitNeonUmax(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e206400);
}
@@ -2743,7 +2743,7 @@ EmitNeonUmaxp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20a400);
}
@@ -2757,7 +2757,7 @@ EmitNeonUmin(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e206c00);
}
@@ -2771,7 +2771,7 @@ EmitNeonUminp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20ac00);
}
@@ -2785,7 +2785,7 @@ EmitNeonUmlal(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e208000);
}
@@ -2799,7 +2799,7 @@ EmitNeonUmlal2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e208000);
}
@@ -2813,7 +2813,7 @@ EmitNeonUmlsl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e20a000);
}
@@ -2827,7 +2827,7 @@ EmitNeonUmlsl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e20a000);
}
@@ -2841,7 +2841,7 @@ EmitNeonUmull(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e20c000);
}
@@ -2855,7 +2855,7 @@ EmitNeonUmull2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e20c000);
}
@@ -2869,7 +2869,7 @@ EmitNeonUqadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e200c00, 0x7e200c00);
}
@@ -2883,7 +2883,7 @@ EmitNeonUqrshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e205c00, 0x7e205c00);
}
@@ -2897,7 +2897,7 @@ EmitNeonUqshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e204c00, 0x7e204c00);
}
@@ -2911,7 +2911,7 @@ EmitNeonUqsub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e202c00, 0x7e202c00);
}
@@ -2925,7 +2925,7 @@ EmitNeonUrhadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e201400);
}
@@ -2939,7 +2939,7 @@ EmitNeonUrshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e205400, 0x7e205400);
}
@@ -2953,7 +2953,7 @@ EmitNeonUshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e204400, 0x7e204400);
}
@@ -2967,7 +2967,7 @@ EmitNeonUsubl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e202000);
}
@@ -2981,7 +2981,7 @@ EmitNeonUsubl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e202000);
}
@@ -2995,7 +2995,7 @@ EmitNeonUsubw(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize & ~4), 0x2e203000);
}
@@ -3009,7 +3009,7 @@ EmitNeonUsubw2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, NEON_SIZE(SrcSize | 4), 0x2e203000);
}
@@ -3023,7 +3023,7 @@ EmitNeonUzp1(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e001800);
}
@@ -3037,7 +3037,7 @@ EmitNeonUzp2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e005800);
}
@@ -3051,7 +3051,7 @@ EmitNeonZip1(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e003800);
}
@@ -3065,7 +3065,7 @@ EmitNeonZip2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e007800);
}
@@ -3087,10 +3087,10 @@ EmitNeonFloatTrinaryCommon(
{
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | ((SrcSize & 1) << 22) | (Src2.RawRegister() << 16) | (Src1.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | ((SrcSize & 1) << 22) | (Src2.RawRegister() << 16) | (Src1.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -3105,7 +3105,7 @@ EmitNeonFabd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea0d400, 0x7ea0d400);
}
@@ -3119,7 +3119,7 @@ EmitNeonFacge(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20ec00, 0x7e20ec00);
}
@@ -3133,7 +3133,7 @@ EmitNeonFacgt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea0ec00, 0x7ea0ec00);
}
@@ -3147,7 +3147,7 @@ EmitNeonFadd(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20d400, 0x1e202800);
}
@@ -3161,7 +3161,7 @@ EmitNeonFaddp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20d400);
}
@@ -3179,7 +3179,7 @@ EmitNeonFcmeq(
// NaNs produce 0s (false)
//
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20e400, 0x5e20e400);
}
@@ -3193,7 +3193,7 @@ EmitNeonFcmge(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20e400, 0x7e20e400);
}
@@ -3207,7 +3207,7 @@ EmitNeonFcmgt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea0e400, 0x7ea0e400);
}
@@ -3220,7 +3220,7 @@ EmitNeonFcmp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatTrinaryCommon(Emitter, NEONREG_D0, Src1, Src2, SrcSize, 0, 0x1e202000);
}
@@ -3232,7 +3232,7 @@ EmitNeonFcmp0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatTrinaryCommon(Emitter, NEONREG_D0, Src1, NEONREG_D0, SrcSize, 0, 0x1e202008);
}
@@ -3245,7 +3245,7 @@ EmitNeonFcmpe(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatTrinaryCommon(Emitter, NEONREG_D0, Src1, Src2, SrcSize, 0, 0x1e202010);
}
@@ -3257,7 +3257,7 @@ EmitNeonFcmpe0(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatTrinaryCommon(Emitter, NEONREG_D0, Src1, NEONREG_D0, SrcSize, 0, 0x1e204018);
}
@@ -3271,7 +3271,7 @@ EmitNeonFdiv(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20fc00, 0x1e201800);
}
@@ -3285,7 +3285,7 @@ EmitNeonFmax(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20f400, 0x1e204800);
}
@@ -3299,7 +3299,7 @@ EmitNeonFmaxnm(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20c400, 0x1e206800);
}
@@ -3313,7 +3313,7 @@ EmitNeonFmaxnmp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20c400);
}
@@ -3327,7 +3327,7 @@ EmitNeonFmaxp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20f400);
}
@@ -3341,7 +3341,7 @@ EmitNeonFmin(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea0f400, 0x1e205800);
}
@@ -3355,7 +3355,7 @@ EmitNeonFminnm(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea0c400, 0x1e207800);
}
@@ -3369,7 +3369,7 @@ EmitNeonFminnmp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea0c400);
}
@@ -3383,7 +3383,7 @@ EmitNeonFminp(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2ea0f400);
}
@@ -3397,7 +3397,7 @@ EmitNeonFmla(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20cc00);
}
@@ -3411,7 +3411,7 @@ EmitNeonFmls(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea0cc00);
}
@@ -3425,7 +3425,7 @@ EmitNeonFmul(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x2e20dc00, 0x1e200800);
}
@@ -3439,7 +3439,7 @@ EmitNeonFmulx(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20dc00, 0x5e20dc00);
}
@@ -3453,7 +3453,7 @@ EmitNeonFnmul(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0, 0x1e208800);
}
@@ -3467,7 +3467,7 @@ EmitNeonFrecps(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0e20fc00, 0x5e20fc00);
}
@@ -3481,7 +3481,7 @@ EmitNeonFrsqrts(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea0fc00, 0x5ea0fc00);
}
@@ -3495,7 +3495,7 @@ EmitNeonFsub(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D | VALID_24S | VALID_2D));
return EmitNeonFloatTrinaryCommon(Emitter, Dest, Src1, Src2, SrcSize, 0x0ea0d400, 0x1e203800);
}
@@ -3516,15 +3516,15 @@ EmitNeonShiftLeftImmediateCommon(
)
{
ULONG Size = SrcSize & 3;
- NT_ASSERT(Immediate < (8U << Size));
+ Assert(Immediate < (8U << Size));
ULONG EffShift = Immediate + (8 << Size);
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | (EffShift << 16) | (Src.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | (EffShift << 16) | (Src.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -3542,15 +3542,15 @@ EmitNeonShiftRightImmediateCommon(
)
{
ULONG Size = SrcSize & 3;
- NT_ASSERT(Immediate <= (8U << Size));
+ Assert(Immediate <= (8U << Size));
ULONG EffShift = (16 << Size) - Immediate;
if (NeonSizeIsScalar(SrcSize)) {
- NT_ASSERT(ScalarOpcode != 0);
+ Assert(ScalarOpcode != 0);
return Emitter.EmitFourBytes(ScalarOpcode | (EffShift << 16) | (Src.RawRegister() << 5) | Dest.RawRegister());
} else {
- NT_ASSERT(VectorOpcode != 0);
+ Assert(VectorOpcode != 0);
return Emitter.EmitFourBytes(VectorOpcode | (((SrcSize >> 2) & 1) << 30) | (EffShift << 16) | (Src.RawRegister() << 5) | Dest.RawRegister());
}
}
@@ -3565,7 +3565,7 @@ EmitNeonRshrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x0f008c00);
}
@@ -3579,7 +3579,7 @@ EmitNeonRshrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x0f008c00);
}
@@ -3593,7 +3593,7 @@ EmitNeonShl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f005400, 0x5f005400);
}
@@ -3607,7 +3607,7 @@ EmitNeonShrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x0f008400);
}
@@ -3621,7 +3621,7 @@ EmitNeonShrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x0f008400);
}
@@ -3635,7 +3635,7 @@ EmitNeonSli(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f005400, 0x7f005400);
}
@@ -3649,7 +3649,7 @@ EmitNeonSqrshrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x0f009c00, 0x5f009c00);
}
@@ -3663,7 +3663,7 @@ EmitNeonSqrshrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x0f009c00);
}
@@ -3677,7 +3677,7 @@ EmitNeonSqrshrun(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x2f008c00, 0x7f008c00);
}
@@ -3691,7 +3691,7 @@ EmitNeonSqrshrun2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x2f008c00);
}
@@ -3705,7 +3705,7 @@ EmitNeonSqshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f007400, 0x5f007400);
}
@@ -3719,7 +3719,7 @@ EmitNeonSqshlu(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f006400, 0x7f006400);
}
@@ -3733,7 +3733,7 @@ EmitNeonSqshrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x0f009400, 0x5f009400);
}
@@ -3747,7 +3747,7 @@ EmitNeonSqshrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x0f009400);
}
@@ -3761,7 +3761,7 @@ EmitNeonSri(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f004400, 0x7f004400);
}
@@ -3775,7 +3775,7 @@ EmitNeonSrshr(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f002400, 0x5f002400);
}
@@ -3789,7 +3789,7 @@ EmitNeonSrsra(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f003400, 0x5f003400);
}
@@ -3803,7 +3803,7 @@ EmitNeonSshll(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE(SrcSize & ~4), 0x0f00a400);
}
@@ -3817,7 +3817,7 @@ EmitNeonSshll2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE(SrcSize | 4), 0x0f00a400);
}
@@ -3831,7 +3831,7 @@ EmitNeonSshr(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f000400, 0x5f000400);
}
@@ -3845,7 +3845,7 @@ EmitNeonSsra(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x0f001400, 0x5f001400);
}
@@ -3858,7 +3858,7 @@ EmitNeonSxtl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, 0, NEON_SIZE(SrcSize & ~4), 0x0f00a400);
}
@@ -3871,7 +3871,7 @@ EmitNeonSxtl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, 0, NEON_SIZE(SrcSize | 4), 0x0f00a400);
}
@@ -3885,7 +3885,7 @@ EmitNeonUqrshrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x2f009c00, 0x7f009c00);
}
@@ -3899,7 +3899,7 @@ EmitNeonUqrshrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x2f009c00);
}
@@ -3913,7 +3913,7 @@ EmitNeonUqshl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f007400, 0x7f007400);
}
@@ -3927,7 +3927,7 @@ EmitNeonUqshrn(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1H | VALID_1S | VALID_1D | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) & ~4), 0x2f009400, 0x7f009400);
}
@@ -3941,7 +3941,7 @@ EmitNeonUqshrn2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE((SrcSize - 1) | 4), 0x2f009400);
}
@@ -3955,7 +3955,7 @@ EmitNeonUrshr(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f002400, 0x7f002400);
}
@@ -3969,7 +3969,7 @@ EmitNeonUrsra(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f003400, 0x7f003400);
}
@@ -3983,7 +3983,7 @@ EmitNeonUshll(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE(SrcSize & ~4), 0x2f00a400);
}
@@ -3997,7 +3997,7 @@ EmitNeonUshll2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, Immediate, NEON_SIZE(SrcSize | 4), 0x2f00a400);
}
@@ -4011,7 +4011,7 @@ EmitNeonUshr(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f000400, 0x7f000400);
}
@@ -4025,7 +4025,7 @@ EmitNeonUsra(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftRightImmediateCommon(Emitter, Dest, Src, Immediate, SrcSize, 0x2f001400, 0x7f001400);
}
@@ -4038,7 +4038,7 @@ EmitNeonUxtl(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, 0, NEON_SIZE(SrcSize & ~4), 0x2f00a400);
}
@@ -4051,7 +4051,7 @@ EmitNeonUxtl2(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_16B | VALID_8H | VALID_4S | VALID_2D));
return EmitNeonShiftLeftImmediateCommon(Emitter, Dest, Src, 0, NEON_SIZE(SrcSize | 4), 0x2f00a400);
}
@@ -4081,7 +4081,7 @@ EmitNeonFcvtmsGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e300000);
}
@@ -4094,7 +4094,7 @@ EmitNeonFcvtmsGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e300000);
}
@@ -4107,7 +4107,7 @@ EmitNeonFcvtmuGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e310000);
}
@@ -4120,7 +4120,7 @@ EmitNeonFcvtmuGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e310000);
}
@@ -4133,7 +4133,7 @@ EmitNeonFcvtnsGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e200000);
}
@@ -4146,7 +4146,7 @@ EmitNeonFcvtnsGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e200000);
}
@@ -4159,7 +4159,7 @@ EmitNeonFcvtnuGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e210000);
}
@@ -4172,7 +4172,7 @@ EmitNeonFcvtnuGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e210000);
}
@@ -4185,7 +4185,7 @@ EmitNeonFcvtpsGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e280000);
}
@@ -4198,7 +4198,7 @@ EmitNeonFcvtpsGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e280000);
}
@@ -4211,7 +4211,7 @@ EmitNeonFcvtpuGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e290000);
}
@@ -4224,7 +4224,7 @@ EmitNeonFcvtpuGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e290000);
}
@@ -4237,7 +4237,7 @@ EmitNeonFcvtzsGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e380000);
}
@@ -4250,7 +4250,7 @@ EmitNeonFcvtzsGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e380000);
}
@@ -4263,7 +4263,7 @@ EmitNeonFcvtzuGen(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x1e390000);
}
@@ -4276,7 +4276,7 @@ EmitNeonFcvtzuGen64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SrcSize, 0x9e390000);
}
@@ -4290,7 +4290,7 @@ EmitNeonFmovToGeneral(
)
{
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1S, 0x1e260000);
}
@@ -4308,7 +4308,7 @@ EmitNeonFmovToGeneral64(
)
{
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1D | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1D | VALID_2D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1D, 0x9e260000);
}
@@ -4326,7 +4326,7 @@ EmitNeonFmovToGeneralHigh64(
)
{
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_2D)); // TODO: Should this be VALID_1D?
+ Assert(NeonSizeIsValid(SrcSize, VALID_2D)); // TODO: Should this be VALID_1D?
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1S /* SIZE_1D */, 0x9eae0000);
}
@@ -4357,7 +4357,7 @@ EmitNeonFmovFromGeneral(
)
{
UNREFERENCED_PARAMETER(DestSize);
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_1S));
+ Assert(NeonSizeIsValid(DestSize, VALID_1S));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1S, 0x1e270000);
}
@@ -4375,7 +4375,7 @@ EmitNeonFmovFromGeneral64(
)
{
UNREFERENCED_PARAMETER(DestSize);
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_1D | VALID_2D));
+ Assert(NeonSizeIsValid(DestSize, VALID_1D | VALID_2D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1D, 0x9e270000);
}
@@ -4393,7 +4393,7 @@ EmitNeonFmovFromGeneralHigh64(
)
{
UNREFERENCED_PARAMETER(DestSize);
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_2D));
+ Assert(NeonSizeIsValid(DestSize, VALID_2D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, SIZE_1S /* SIZE_1D */, 0x9eaf0000);
}
@@ -4406,7 +4406,7 @@ EmitNeonScvtf(
NEON_SIZE DstSize
)
{
- NT_ASSERT(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, DstSize, 0x1e220000);
}
@@ -4419,7 +4419,7 @@ EmitNeonScvtf64(
NEON_SIZE DstSize
)
{
- NT_ASSERT(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, DstSize, 0x9e220000);
}
@@ -4432,7 +4432,7 @@ EmitNeonUcvtf(
NEON_SIZE DstSize
)
{
- NT_ASSERT(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, DstSize, 0x1e230000);
}
@@ -4445,7 +4445,7 @@ EmitNeonUcvtf64(
NEON_SIZE DstSize
)
{
- NT_ASSERT(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DstSize, VALID_1S | VALID_1D));
return EmitNeonConvertScalarCommon(Emitter, Dest, Src, DstSize, 0x9e230000);
}
@@ -4465,7 +4465,7 @@ EmitNeonMovElementCommon(
)
{
ULONG Size = SrcSize & 3;
- NT_ASSERT((SrcIndex << Size) < 16);
+ Assert((SrcIndex << Size) < 16);
SrcIndex = ((SrcIndex << 1) | 1) << Size;
@@ -4482,7 +4482,7 @@ EmitNeonDupElement(
NEON_SIZE DestSize
)
{
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonMovElementCommon(Emitter, Dest, Src, SrcIndex, DestSize, 0x0e000400 | (((DestSize >> 2) & 1) << 30));
}
@@ -4495,7 +4495,7 @@ EmitNeonDup(
NEON_SIZE DestSize
)
{
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
return EmitNeonMovElementCommon(Emitter, Dest, NeonRegisterParam(NEONREG_D0 + Src.RawRegister()), 0, DestSize, 0x0e000c00 | (((DestSize >> 2) & 1) << 30));
}
@@ -4509,7 +4509,7 @@ EmitNeonIns(
NEON_SIZE DestSize
)
{
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(DestSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D));
return EmitNeonMovElementCommon(Emitter, Dest, NeonRegisterParam(NEONREG_D0 + Src.RawRegister()), DestIndex, DestSize, 0x4e001c00);
}
@@ -4523,7 +4523,7 @@ EmitNeonSmov(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H));
return EmitNeonMovElementCommon(Emitter, NeonRegisterParam(NEONREG_D0 + Dest.RawRegister()), Src, SrcIndex, SrcSize, 0x0e002c00);
}
@@ -4537,7 +4537,7 @@ EmitNeonSmov64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S));
return EmitNeonMovElementCommon(Emitter, NeonRegisterParam(NEONREG_D0 + Dest.RawRegister()), Src, SrcIndex, SrcSize, 0x4e002c00);
}
@@ -4551,7 +4551,7 @@ EmitNeonUmov(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S));
return EmitNeonMovElementCommon(Emitter, NeonRegisterParam(NEONREG_D0 + Dest.RawRegister()), Src, SrcIndex, SrcSize, 0x0e003c00);
}
@@ -4565,7 +4565,7 @@ EmitNeonUmov64(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1B | VALID_1H | VALID_1S | VALID_1D));
return EmitNeonMovElementCommon(Emitter, NeonRegisterParam(NEONREG_D0 + Dest.RawRegister()), Src, SrcIndex, SrcSize, 0x4e003c00);
}
@@ -4584,11 +4584,11 @@ EmitNeonInsElement(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D));
ULONG Size = SrcSize & 3;
- NT_ASSERT((DestIndex << Size) < 16);
- NT_ASSERT((SrcIndex << Size) < 16);
+ Assert((DestIndex << Size) < 16);
+ Assert((SrcIndex << Size) < 16);
DestIndex = ((DestIndex << 1) | 1) << Size;
SrcIndex <<= Size;
@@ -4611,7 +4611,7 @@ EmitNeonFcsel(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
+ Assert(NeonSizeIsValid(SrcSize, VALID_1S | VALID_1D));
return Emitter.EmitFourBytes(0x1e200c00 | ((SrcSize & 1) << 22) | (Src2.RawRegister() << 16) | ((Condition & 15) << 12) | (Src1.RawRegister() << 5) | Dest.RawRegister());
}
@@ -4703,7 +4703,7 @@ ComputeNeonImmediate(
Op = 1;
}
- NT_ASSERT(EncImmediate < 256);
+ Assert(EncImmediate < 256);
return (Op << 29) | (((EncImmediate >> 5) & 7) << 16) | (Cmode << 12) | ((EncImmediate & 0x1f) << 5);
}
@@ -4716,7 +4716,7 @@ EmitNeonMovi(
NEON_SIZE DestSize
)
{
- NT_ASSERT(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D | VALID_1D));
+ Assert(NeonSizeIsValid(DestSize, VALID_816B | VALID_48H | VALID_24S | VALID_2D | VALID_1D));
ULONG EncImmediate = ComputeNeonImmediate(Immediate, DestSize);
if (EncImmediate != 1) {
@@ -4728,7 +4728,7 @@ EmitNeonMovi(
return Emitter.EmitFourBytes(0x2f000400 | (((DestSize >> 2) & 1) << 30) | EncImmediate | Dest.RawRegister());
}
- NT_ASSERT(false);
+ Assert(false);
return 0;
}
@@ -4753,7 +4753,7 @@ EmitNeonTbl(
} else {
- NT_ASSERT(Size == SIZE_16B);
+ Assert(Size == SIZE_16B);
return Emitter.EmitFourBytes(0x4e000000 | (Indices.RawRegister() << 16) | (Src.RawRegister() << 5) | Dest.RawRegister());
}
@@ -4774,8 +4774,8 @@ EmitNeonExt(
NEON_SIZE SrcSize
)
{
- NT_ASSERT(NeonSizeIsValid(SrcSize, VALID_816B));
- NT_ASSERT(((SrcSize == SIZE_8B) && (Immediate < 8)) ||
+ Assert(NeonSizeIsValid(SrcSize, VALID_816B));
+ Assert(((SrcSize == SIZE_8B) && (Immediate < 8)) ||
((SrcSize == SIZE_16B) && (Immediate < 16)));
return Emitter.EmitFourBytes(0x2e000000 | (((SrcSize >> 2) & 1) << 30) | (Src2.RawRegister() << 16) | (Immediate << 11) | (Src1.RawRegister() << 5) | Dest.RawRegister());
@@ -4797,7 +4797,7 @@ EmitNeonLdrStrOffsetCommon(
ULONG OpcodeUnscaled
)
{
- NT_ASSERT(NeonSizeIsScalar(SrcDestSize));
+ Assert(NeonSizeIsScalar(SrcDestSize));
ULONG SizeBits = ((SrcDestSize & 3) << 30) | ((SrcDestSize >> 2) << 23);
@@ -4857,7 +4857,7 @@ EmitNeonLdpStpOffsetCommon(
ULONG Opcode
)
{
- NT_ASSERT(NeonSizeIsValid(SrcDestSize, VALID_1S | VALID_1D | VALID_1Q));
+ Assert(NeonSizeIsValid(SrcDestSize, VALID_1S | VALID_1D | VALID_1Q));
ULONG Opc = (SrcDestSize - 2);
@@ -4917,7 +4917,7 @@ EmitNeonLd1St1Common(
QSSize |= 1;
}
- NT_ASSERT(QSSize < 16);
+ Assert(QSSize < 16);
ULONG Op = (SrcDestSize == SIZE_1B) ? 0 : (SrcDestSize == SIZE_1H) ? 2 : 4;
return Emitter.EmitFourBytes(Opcode | ((QSSize >> 3) << 30) | (Op << 13) | ((QSSize & 7) << 10) | (Addr.RawRegister() << 5) | SrcDest.RawRegister());
@@ -4965,7 +4965,7 @@ EmitNeonAesD(
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(SrcSize == SIZE_16B);
+ Assert(SrcSize == SIZE_16B);
return Emitter.EmitFourBytes(0x4e285800 | (Src.RawRegister() << 5) | Dest.RawRegister());
}
@@ -4982,7 +4982,7 @@ EmitNeonAesE(
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(SrcSize == SIZE_16B);
+ Assert(SrcSize == SIZE_16B);
return Emitter.EmitFourBytes(0x4e284800 | (Src.RawRegister() << 5) | Dest.RawRegister());
}
@@ -4999,7 +4999,7 @@ EmitNeonAesImc(
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(SrcSize == SIZE_16B);
+ Assert(SrcSize == SIZE_16B);
return Emitter.EmitFourBytes(0x4e287800 | (Src.RawRegister() << 5) | Dest.RawRegister());
}
@@ -5016,7 +5016,7 @@ EmitNeonAesMc(
UNREFERENCED_PARAMETER(SrcSize);
- NT_ASSERT(SrcSize == SIZE_16B);
+ Assert(SrcSize == SIZE_16B);
return Emitter.EmitFourBytes(0x4e286800 | (Src.RawRegister() << 5) | Dest.RawRegister());
}
diff --git a/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.cpp b/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.cpp
index 4f04f86e365..a76abb04731 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.cpp
@@ -44,19 +44,19 @@ EncoderMD::Init(Encoder *encoder)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
{
return GetRegEncode(regOpnd->GetReg());
}
-const BYTE
+BYTE
EncoderMD::GetRegEncode(RegNum reg)
{
return RegEncode[reg];
}
-const BYTE
+BYTE
EncoderMD::GetFloatRegEncode(IR::RegOpnd *regOpnd)
{
BYTE regEncode = GetRegEncode(regOpnd->GetReg());
diff --git a/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.h b/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.h
index aa5097c56ad..90a255aa4b3 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.h
+++ b/deps/chakrashim/core/lib/Backend/arm64/EncoderMD.h
@@ -161,9 +161,9 @@ class EncoderMD
void ApplyRelocs(size_t codeBufferAddress, size_t codeSize, uint* bufferCRC, BOOL isBrShorteningSucceeded, bool isFinalBufferValidation = false);
static bool TryConstFold(IR::Instr *instr, IR::RegOpnd *regOpnd);
static bool TryFold(IR::Instr *instr, IR::RegOpnd *regOpnd);
- const BYTE GetRegEncode(IR::RegOpnd *regOpnd);
- const BYTE GetFloatRegEncode(IR::RegOpnd *regOpnd);
- static const BYTE GetRegEncode(RegNum reg);
+ BYTE GetRegEncode(IR::RegOpnd *regOpnd);
+ BYTE GetFloatRegEncode(IR::RegOpnd *regOpnd);
+ static BYTE GetRegEncode(RegNum reg);
static uint32 GetOpdope(IR::Instr *instr);
static uint32 GetOpdope(Js::OpCode op);
diff --git a/deps/chakrashim/core/lib/Backend/arm64/LowerMD.cpp b/deps/chakrashim/core/lib/Backend/arm64/LowerMD.cpp
index e37b365943c..07f4972b10a 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/LowerMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/arm64/LowerMD.cpp
@@ -223,10 +223,14 @@ LowererMD::LowerCallHelper(IR::Instr *instrCall)
Assert(regArg->m_sym->m_isSingleDef);
IR::Instr *instrArg = regArg->m_sym->m_instrDef;
- Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A ||
- (helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A) ||
- (helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj && instrArg->m_opcode == Js::OpCode::ExtendArg_A));
+ Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A || instrArg->m_opcode == Js::OpCode::ExtendArg_A &&
+ (
+ helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperScrFunc_OP_NewScGenFuncHomeObj ||
+ helperMethod == IR::JnHelperMethod::HelperRestify ||
+ helperMethod == IR::JnHelperMethod::HelperStPropIdArrFromVar
+ ));
prevInstr = this->LoadHelperArgument(prevInstr, instrArg->GetSrc1());
argOpnd = instrArg->GetSrc2();
@@ -2673,9 +2677,156 @@ LowererMD::GenerateFastCmSrEqConst(IR::Instr *instr)
return false;
}
-bool LowererMD::GenerateFastCmXxI4(IR::Instr *instr)
+void LowererMD::GenerateFastCmXxI4(IR::Instr *instr)
{
- return this->GenerateFastCmXxTaggedInt(instr);
+ this->GenerateFastCmXx(instr);
+}
+
+void LowererMD::GenerateFastCmXxR8(IR::Instr *instr)
+{
+ this->GenerateFastCmXx(instr);
+}
+
+void LowererMD::GenerateFastCmXx(IR::Instr *instr)
+{
+ // For float src:
+ // LDIMM dst, trueResult
+ // FCMP src1, src2
+ // - BVS $done (NaN check iff B.cond is BNE)
+ // B.cond $done
+ // LDIMM dst, falseResult
+ // $done
+
+ // For Int src:
+ // LDIMM dst, trueResult
+ // CMP src1, src2
+ // B.cond $done
+ // LDIMM dst, falseResult
+ // $done:
+
+ IR::Opnd * src1 = instr->UnlinkSrc1();
+ IR::Opnd * src2 = instr->UnlinkSrc2();
+ IR::Opnd * dst = instr->UnlinkDst();
+ bool isIntDst = dst->AsRegOpnd()->m_sym->IsInt32();
+ bool isFloatSrc = src1->IsFloat();
+ Assert(!isFloatSrc || src2->IsFloat());
+ Assert(!src1->IsInt64() || src2->IsInt64());
+ Assert(!isFloatSrc || AutoSystemInfo::Data.SSE2Available());
+ Assert(src1->IsRegOpnd());
+ IR::Opnd * opndTrue;
+ IR::Opnd * opndFalse;
+ IR::Instr * newInstr;
+ IR::LabelInstr * done = IR::LabelInstr::New(Js::OpCode::Label, m_func);
+
+ if (dst->IsEqual(src1))
+ {
+ IR::RegOpnd *newSrc1 = IR::RegOpnd::New(src1->GetType(), m_func);
+ Lowerer::InsertMove(newSrc1, src1, instr);
+ src1 = newSrc1;
+ }
+
+ if (dst->IsEqual(src2))
+ {
+ IR::RegOpnd *newSrc2 = IR::RegOpnd::New(src1->GetType(), m_func);
+ Lowerer::InsertMove(newSrc2, src2, instr);
+ src2 = newSrc2;
+ }
+
+ if (isIntDst)
+ {
+ opndTrue = IR::IntConstOpnd::New(1, TyInt32, this->m_func);
+ opndFalse = IR::IntConstOpnd::New(0, TyInt32, this->m_func);
+ }
+ else
+ {
+ opndTrue = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue);
+ opndFalse = this->m_lowerer->LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse);
+ }
+
+ Lowerer::InsertMove(dst, opndTrue, instr);
+
+ // CMP src1, src2
+ newInstr = IR::Instr::New(isFloatSrc ? Js::OpCode::FCMP : Js::OpCode::CMP, this->m_func);
+ newInstr->SetSrc1(src1);
+ newInstr->SetSrc2(src2);
+ instr->InsertBefore(newInstr);
+ LowererMD::Legalize(newInstr);
+
+ bool addNaNCheck = false;
+ Js::OpCode opcode = Js::OpCode::InvalidOpCode;
+
+ switch (instr->m_opcode)
+ {
+ case Js::OpCode::CmEq_A:
+ case Js::OpCode::CmSrEq_A:
+ case Js::OpCode::CmEq_I4:
+ opcode = Js::OpCode::BEQ;
+ break;
+
+ case Js::OpCode::CmNeq_A:
+ case Js::OpCode::CmSrNeq_A:
+ case Js::OpCode::CmNeq_I4:
+ opcode = Js::OpCode::BNE;
+ addNaNCheck = isFloatSrc;
+ break;
+
+ case Js::OpCode::CmGt_A:
+ case Js::OpCode::CmGt_I4:
+ opcode = Js::OpCode::BGT;
+ break;
+
+ case Js::OpCode::CmGe_A:
+ case Js::OpCode::CmGe_I4:
+ opcode = Js::OpCode::BGE;
+ break;
+
+ case Js::OpCode::CmLt_A:
+ case Js::OpCode::CmLt_I4:
+ //Can't use BLT as is set when the operands are unordered (NaN).
+ opcode = isFloatSrc ? Js::OpCode::BCC : Js::OpCode::BLT;
+ break;
+
+ case Js::OpCode::CmLe_A:
+ case Js::OpCode::CmLe_I4:
+ //Can't use BLE as it is set when the operands are unordered (NaN).
+ opcode = isFloatSrc ? Js::OpCode::BLS : Js::OpCode::BLE;
+ break;
+
+ case Js::OpCode::CmUnGt_A:
+ case Js::OpCode::CmUnGt_I4:
+ opcode = Js::OpCode::BHI;
+ break;
+
+ case Js::OpCode::CmUnGe_A:
+ case Js::OpCode::CmUnGe_I4:
+ opcode = Js::OpCode::BCS;
+ break;
+
+ case Js::OpCode::CmUnLt_A:
+ case Js::OpCode::CmUnLt_I4:
+ opcode = Js::OpCode::BCC;
+ break;
+
+ case Js::OpCode::CmUnLe_A:
+ case Js::OpCode::CmUnLe_I4:
+ opcode = Js::OpCode::BLS;
+ break;
+
+ default: Assert(false);
+ }
+
+ if (addNaNCheck)
+ {
+ newInstr = IR::BranchInstr::New(Js::OpCode::BVS, done, m_func);
+ instr->InsertBefore(newInstr);
+ }
+
+ newInstr = IR::BranchInstr::New(opcode, done, m_func);
+ instr->InsertBefore(newInstr);
+
+ Lowerer::InsertMove(dst, opndFalse, instr);
+ instr->InsertBefore(done);
+ instr->Remove();
}
///----------------------------------------------------------------------------
@@ -2800,6 +2951,7 @@ bool LowererMD::GenerateFastCmXxTaggedInt(IR::Instr *instr, bool isInHelper /*
Lowerer::InsertMove(newSrc1, src1, instr);
src1 = newSrc1;
}
+
if (dst->IsEqual(src2))
{
IR::RegOpnd *newSrc2 = IR::RegOpnd::New(TyMachReg, m_func);
diff --git a/deps/chakrashim/core/lib/Backend/arm64/LowerMD.h b/deps/chakrashim/core/lib/Backend/arm64/LowerMD.h
index 0830c916226..95d7fd2a903 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/LowerMD.h
+++ b/deps/chakrashim/core/lib/Backend/arm64/LowerMD.h
@@ -94,8 +94,9 @@ class LowererMD
void GenerateTaggedZeroTest( IR::Opnd * opndSrc, IR::Instr * instrInsert, IR::LabelInstr * labelHelper = nullptr);
bool GenerateObjectTest(IR::Opnd * opndSrc, IR::Instr * insertInstr, IR::LabelInstr * labelTarget, bool fContinueLabel = false);
bool GenerateFastCmSrEqConst(IR::Instr *instr);
- bool GenerateFastCmXxI4(IR::Instr *instr);
- bool GenerateFastCmXxR8(IR::Instr *instr) { Assert(UNREACHED); return nullptr; }
+ void GenerateFastCmXxI4(IR::Instr *instr);
+ void GenerateFastCmXxR8(IR::Instr *instr);
+ void GenerateFastCmXx(IR::Instr *instr);
bool GenerateFastCmXxTaggedInt(IR::Instr *instr, bool isInHelper = false);
IR::Instr * GenerateConvBool(IR::Instr *instr);
void GenerateClz(IR::Instr * instr);
diff --git a/deps/chakrashim/core/lib/Backend/arm64/machvalues.h b/deps/chakrashim/core/lib/Backend/arm64/machvalues.h
index 9e508568046..c2449c39d5a 100644
--- a/deps/chakrashim/core/lib/Backend/arm64/machvalues.h
+++ b/deps/chakrashim/core/lib/Backend/arm64/machvalues.h
@@ -18,4 +18,4 @@ static const int MachDouble = 8;
static const int MachRegDouble = 8;
static const int MachArgsSlotOffset = MachPtr;
static const int MachStackAlignment = 16; // On ARM64 SP needs to be 16 byte aligned for load/store
-static const unsigned __int64 MachSignBit = 0x8000000000000000;
\ No newline at end of file
+static const unsigned __int64 MachSignBit = 0x8000000000000000;
diff --git a/deps/chakrashim/core/lib/Backend/i386/EncoderMD.cpp b/deps/chakrashim/core/lib/Backend/i386/EncoderMD.cpp
index ff346b071f1..deab2b68337 100644
--- a/deps/chakrashim/core/lib/Backend/i386/EncoderMD.cpp
+++ b/deps/chakrashim/core/lib/Backend/i386/EncoderMD.cpp
@@ -105,7 +105,7 @@ EncoderMD::Init(Encoder *encoder)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetOpcodeByte2(IR::Instr *instr)
{
return OpcodeByte2[instr->m_opcode - (Js::OpCode::MDStart+1)];
@@ -154,7 +154,7 @@ EncoderMD::GetOpbyte(IR::Instr *instr)
///
///----------------------------------------------------------------------------
-const BYTE
+BYTE
EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
{
AssertMsg(regOpnd->GetReg() != RegNOREG, "RegOpnd should have valid reg in encoder");
@@ -171,7 +171,7 @@ EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
///
///----------------------------------------------------------------------------
-const uint32
+uint32
EncoderMD::GetOpdope(IR::Instr *instr)
{
return Opdope[instr->m_opcode - (Js::OpCode::MDStart+1)];
@@ -185,7 +185,7 @@ EncoderMD::GetOpdope(IR::Instr *instr)
///
///----------------------------------------------------------------------------
-const uint32
+uint32
EncoderMD::GetLeadIn(IR::Instr * instr)
{
return OpcodeLeadIn[instr->m_opcode - (Js::OpCode::MDStart+1)];
diff --git a/deps/chakrashim/core/lib/Backend/i386/EncoderMD.h b/deps/chakrashim/core/lib/Backend/i386/EncoderMD.h
index 1c3aaa996b0..5ec3dd02006 100644
--- a/deps/chakrashim/core/lib/Backend/i386/EncoderMD.h
+++ b/deps/chakrashim/core/lib/Backend/i386/EncoderMD.h
@@ -229,13 +229,13 @@ class EncoderMD
BYTE * GetRelocBufferAddress(EncodeRelocAndLabels * reloc);
private:
- const BYTE GetOpcodeByte2(IR::Instr *instr);
+ BYTE GetOpcodeByte2(IR::Instr *instr);
const BYTE * GetFormTemplate(IR::Instr *instr);
static Forms GetInstrForm(IR::Instr *instr);
const BYTE * GetOpbyte(IR::Instr *instr);
- const BYTE GetRegEncode(IR::RegOpnd *regOpnd);
- const uint32 GetLeadIn(IR::Instr * instr);
- static const uint32 GetOpdope(IR::Instr *instr);
+ BYTE GetRegEncode(IR::RegOpnd *regOpnd);
+ uint32 GetLeadIn(IR::Instr * instr);
+ static uint32 GetOpdope(IR::Instr *instr);
void EmitModRM(IR::Instr * instr, IR::Opnd *opnd, BYTE reg1);
void EmitConst(size_t val, int size);
int EmitImmed(IR::Opnd * opnd, int opSize, int sbit);
diff --git a/deps/chakrashim/core/lib/Backend/i386/machvalues.h b/deps/chakrashim/core/lib/Backend/i386/machvalues.h
index cd9c099b6bf..bc47181f8d3 100644
--- a/deps/chakrashim/core/lib/Backend/i386/machvalues.h
+++ b/deps/chakrashim/core/lib/Backend/i386/machvalues.h
@@ -18,4 +18,4 @@ static const int MachMaxInstrSize = 11;
static const int MachArgsSlotOffset = MachPtr;
static const int MachStackAlignment = MachDouble;
static const unsigned int MachSignBit = 0x80000000;
-static const int MachSimd128 = 16;
\ No newline at end of file
+static const int MachSimd128 = 16;
diff --git a/deps/chakrashim/core/lib/Common/ChakraCoreVersion.h b/deps/chakrashim/core/lib/Common/ChakraCoreVersion.h
index 3cb11ff51ad..ca2857cfcc3 100644
--- a/deps/chakrashim/core/lib/Common/ChakraCoreVersion.h
+++ b/deps/chakrashim/core/lib/Common/ChakraCoreVersion.h
@@ -16,8 +16,8 @@
// ChakraCore version number definitions (used in ChakraCore binary metadata)
#define CHAKRA_CORE_MAJOR_VERSION 1
-#define CHAKRA_CORE_MINOR_VERSION 11
-#define CHAKRA_CORE_PATCH_VERSION 15
+#define CHAKRA_CORE_MINOR_VERSION 12
+#define CHAKRA_CORE_PATCH_VERSION 0
#define CHAKRA_CORE_VERSION_RELEASE_QFE 0 // Redundant with PATCH_VERSION. Keep this value set to 0.
// -------------
@@ -54,7 +54,7 @@
// * Does not add anything to the file description
// ChakraCore RELEASE and PRERELEASE flags
-#define CHAKRA_CORE_VERSION_RELEASE 1
+#define CHAKRA_CORE_VERSION_RELEASE 0
#define CHAKRA_CORE_VERSION_PRERELEASE 0
// Chakra RELEASE flag
diff --git a/deps/chakrashim/core/lib/Common/Common/Chakra.Common.Common.vcxproj b/deps/chakrashim/core/lib/Common/Common/Chakra.Common.Common.vcxproj
index 3682f1bde5e..4e65257f912 100644
--- a/deps/chakrashim/core/lib/Common/Common/Chakra.Common.Common.vcxproj
+++ b/deps/chakrashim/core/lib/Common/Common/Chakra.Common.Common.vcxproj
@@ -38,6 +38,7 @@
+
@@ -59,6 +60,7 @@
+
diff --git a/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.cpp b/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.cpp
new file mode 100644
index 00000000000..6cf35e49e92
--- /dev/null
+++ b/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.cpp
@@ -0,0 +1,137 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+#include "CommonCommonPch.h"
+#include "CompressionUtilities.h"
+
+#ifdef ENABLE_COMPRESSION_UTILITIES
+#include
+#endif
+
+#define IFFALSEGO(expr,label) do { if(!(expr)) { goto label; } } while(0);
+#define IFFALSEGOANDGETLASTERROR(expr,label) do { if(!(expr)) { hr = HRESULT_FROM_WIN32(GetLastError()); goto label; } } while(0);
+
+using namespace Js;
+
+DWORD ConvertCompressionAlgorithm(CompressionUtilities::CompressionAlgorithm algorithm)
+{
+ // Note: The algorithms listed in CompressionUtilities.h should be kept in-sync with those
+ // defined in compressapi.h or else we will need to do more than a simple cast here.
+ return static_cast(algorithm);
+}
+
+HRESULT CompressionUtilities::CompressBuffer(
+ _In_ ArenaAllocator* alloc,
+ _In_ const byte* inputBuffer,
+ _In_ size_t inputBufferByteCount,
+ _Out_ byte** compressedBuffer,
+ _Out_ size_t* compressedBufferByteCount,
+ _In_opt_ CompressionAlgorithm algorithm)
+{
+ Assert(compressedBuffer != nullptr);
+ Assert(compressedBufferByteCount != nullptr);
+
+ *compressedBuffer = nullptr;
+ *compressedBufferByteCount = 0;
+
+ HRESULT hr = E_FAIL;
+
+#ifdef ENABLE_COMPRESSION_UTILITIES
+ COMPRESSOR_HANDLE compressor = nullptr;
+ IFFALSEGOANDGETLASTERROR(CreateCompressor(ConvertCompressionAlgorithm(algorithm), nullptr, &compressor), Error);
+
+ if (algorithm == CompressionAlgorithm_Xpress || algorithm == CompressionAlgorithm_Xpress_Huff)
+ {
+ DWORD level = 0;
+ IFFALSEGOANDGETLASTERROR(SetCompressorInformation(compressor, COMPRESS_INFORMATION_CLASS_LEVEL, &level, sizeof(DWORD)), Error);
+ }
+
+ SIZE_T compressedByteCount = 0;
+ bool result = Compress(compressor, inputBuffer, inputBufferByteCount, nullptr, 0, &compressedByteCount);
+
+ if (!result)
+ {
+ DWORD errorCode = GetLastError();
+ if (errorCode != ERROR_INSUFFICIENT_BUFFER)
+ {
+ hr = HRESULT_FROM_WIN32(errorCode);
+ goto Error;
+ }
+ }
+
+ *compressedBuffer = AnewNoThrowArray(alloc, byte, compressedByteCount);
+ IFFALSEGO(*compressedBuffer != nullptr, Error);
+
+ SIZE_T compressedDataSize;
+ IFFALSEGOANDGETLASTERROR(Compress(compressor, inputBuffer, inputBufferByteCount, *compressedBuffer, compressedByteCount, &compressedDataSize), Error);
+ *compressedBufferByteCount = compressedDataSize;
+
+ hr = S_OK;
+
+Error:
+ if (compressor != nullptr)
+ {
+ CloseCompressor(compressor);
+ }
+#else
+ hr = E_NOTIMPL;
+#endif
+
+ return hr;
+}
+
+HRESULT CompressionUtilities::DecompressBuffer(
+ _In_ ArenaAllocator* alloc,
+ _In_ const byte* compressedBuffer,
+ _In_ size_t compressedBufferByteCount,
+ _Out_ byte** decompressedBuffer,
+ _Out_ size_t* decompressedBufferByteCount,
+ _In_opt_ CompressionAlgorithm algorithm)
+{
+ Assert(decompressedBuffer != nullptr);
+ Assert(decompressedBufferByteCount != nullptr);
+
+ *decompressedBuffer = nullptr;
+ *decompressedBufferByteCount = 0;
+
+ HRESULT hr = E_FAIL;
+
+#ifdef ENABLE_COMPRESSION_UTILITIES
+ DECOMPRESSOR_HANDLE decompressor = nullptr;
+ IFFALSEGOANDGETLASTERROR(CreateDecompressor(ConvertCompressionAlgorithm(algorithm), nullptr, &decompressor), Error);
+
+ SIZE_T decompressedByteCount = 0;
+ bool result = Decompress(decompressor, compressedBuffer, compressedBufferByteCount, nullptr, 0, &decompressedByteCount);
+
+ if (!result)
+ {
+ DWORD errorCode = GetLastError();
+ if (errorCode != ERROR_INSUFFICIENT_BUFFER)
+ {
+ hr = HRESULT_FROM_WIN32(errorCode);
+ goto Error;
+ }
+ }
+
+ *decompressedBuffer = AnewNoThrowArray(alloc, byte, decompressedByteCount);
+ IFFALSEGO(*decompressedBuffer != nullptr, Error);
+
+ SIZE_T uncompressedDataSize = 0;
+ IFFALSEGOANDGETLASTERROR(Decompress(decompressor, compressedBuffer, compressedBufferByteCount, *decompressedBuffer, decompressedByteCount, &uncompressedDataSize), Error);
+ *decompressedBufferByteCount = uncompressedDataSize;
+
+ hr = S_OK;
+
+Error:
+ if (decompressor != nullptr)
+ {
+ CloseDecompressor(decompressor);
+ }
+#else
+ hr = E_NOTIMPL;
+#endif
+
+ return hr;
+}
diff --git a/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.h b/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.h
new file mode 100644
index 00000000000..db712566ecb
--- /dev/null
+++ b/deps/chakrashim/core/lib/Common/Common/CompressionUtilities.h
@@ -0,0 +1,37 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+#pragma once
+
+namespace Js
+{
+ class CompressionUtilities
+ {
+ public:
+ enum CompressionAlgorithm : byte
+ {
+ CompressionAlgorithm_MSZip = 0x2,
+ CompressionAlgorithm_Xpress = 0x3,
+ CompressionAlgorithm_Xpress_Huff = 0x4,
+ CompressionAlgorithm_LZMS = 0x5,
+ CompressionAlgorithm_Invalid = 0xf
+ };
+
+ static HRESULT CompressBuffer(
+ _In_ ArenaAllocator* alloc,
+ _In_ const byte* inputBuffer,
+ _In_ size_t inputBufferByteCount,
+ _Out_ byte** compressedBuffer,
+ _Out_ size_t* compressedBufferByteCount,
+ _In_opt_ CompressionAlgorithm algorithm = CompressionAlgorithm_Xpress);
+
+ static HRESULT DecompressBuffer(
+ _In_ ArenaAllocator* alloc,
+ _In_ const byte* compressedBuffer,
+ _In_ size_t compressedBufferByteCount,
+ _Out_ byte** decompressedBuffer,
+ _Out_ size_t* decompressedBufferByteCount,
+ _In_opt_ CompressionAlgorithm algorithm = CompressionAlgorithm_Xpress);
+ };
+}
diff --git a/deps/chakrashim/core/lib/Common/Common/Jobs.cpp b/deps/chakrashim/core/lib/Common/Common/Jobs.cpp
index 7158acd39c8..c427092f42a 100644
--- a/deps/chakrashim/core/lib/Common/Common/Jobs.cpp
+++ b/deps/chakrashim/core/lib/Common/Common/Jobs.cpp
@@ -1299,7 +1299,7 @@ namespace JsUtil
#if !defined(_UCRT)
HMODULE dllHandle = NULL;
- if (!GetModuleHandleEx(0, AutoSystemInfo::GetJscriptDllFileName(), &dllHandle))
+ if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)&BackgroundJobProcessor::StaticThreadProc, &dllHandle))
{
dllHandle = NULL;
}
diff --git a/deps/chakrashim/core/lib/Common/Common/Jobs.h b/deps/chakrashim/core/lib/Common/Common/Jobs.h
index 50b91817ddf..44ca29040e8 100644
--- a/deps/chakrashim/core/lib/Common/Common/Jobs.h
+++ b/deps/chakrashim/core/lib/Common/Common/Jobs.h
@@ -442,7 +442,7 @@ namespace JsUtil
ParallelThreadData(AllocationPolicyManager* policyManager);
- PageAllocator* const GetPageAllocator() { return &backgroundPageAllocator; }
+ PageAllocator* GetPageAllocator() { return &backgroundPageAllocator; }
bool CanDecommit() const { return canDecommit; }
};
diff --git a/deps/chakrashim/core/lib/Common/Common/NumberUtilities.cpp b/deps/chakrashim/core/lib/Common/Common/NumberUtilities.cpp
index 2b0937f4a9c..a57125c0475 100644
--- a/deps/chakrashim/core/lib/Common/Common/NumberUtilities.cpp
+++ b/deps/chakrashim/core/lib/Common/Common/NumberUtilities.cpp
@@ -684,8 +684,8 @@ using namespace Js;
double NumberUtilities::StrToDbl(const EncodedChar * psz, const EncodedChar **ppchLim, Js::ScriptContext *const scriptContext)
{
Assert(scriptContext);
- bool likelyInt = true;
- return Js::NumberUtilities::StrToDbl(psz, ppchLim, likelyInt);
+ LikelyNumberType likelyType = LikelyNumberType::Int;
+ return Js::NumberUtilities::StrToDbl(psz, ppchLim, likelyType);
}
template double NumberUtilities::StrToDbl(const char16 * psz, const char16 **ppchLim, Js::ScriptContext *const scriptContext);
diff --git a/deps/chakrashim/core/lib/Common/Common/NumberUtilities.h b/deps/chakrashim/core/lib/Common/Common/NumberUtilities.h
index 2ba8cf48851..737b56c8f4b 100644
--- a/deps/chakrashim/core/lib/Common/Common/NumberUtilities.h
+++ b/deps/chakrashim/core/lib/Common/Common/NumberUtilities.h
@@ -4,6 +4,14 @@
//-------------------------------------------------------------------------------------------------------
#pragma once
+enum class LikelyNumberType
+{
+ Double,
+ Int,
+ BigInt,
+};
+
+
namespace Js
{
class NumberConstants : public NumberConstantsBase
@@ -217,7 +225,7 @@ namespace Js
// Implemented in lib\parser\common. Should move to lib\common
template
- static double StrToDbl(const EncodedChar *psz, const EncodedChar **ppchLim, bool& likelyInt);
+ static double StrToDbl(const EncodedChar *psz, const EncodedChar **ppchLim, LikelyNumberType& likelyType, bool isESBigIntEnabled = false);
static BOOL FDblToStr(double dbl, __out_ecount(nDstBufSize) char16 *psz, int nDstBufSize);
static int FDblToStr(double dbl, NumberUtilities::FormatType ft, int nDigits, __out_ecount(cchDst) char16 *pchDst, int cchDst);
diff --git a/deps/chakrashim/core/lib/Common/Common/NumberUtilities_strtod.cpp b/deps/chakrashim/core/lib/Common/Common/NumberUtilities_strtod.cpp
index 6cb28f41f1c..8ed5a50a2e3 100644
--- a/deps/chakrashim/core/lib/Common/Common/NumberUtilities_strtod.cpp
+++ b/deps/chakrashim/core/lib/Common/Common/NumberUtilities_strtod.cpp
@@ -3,7 +3,7 @@
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#include "CommonCommonPch.h"
-#include "DataStructures/BigInt.h"
+#include "DataStructures/BigUInt.h"
namespace Js
{
@@ -719,7 +719,7 @@ and re-compare.
template
static double AdjustDbl(double dbl, const EncodedChar *prgch, int32 cch, int32 lwExp)
{
- Js::BigInt biDec, biDbl;
+ Js::BigUInt biDec, biDbl;
int32 c2Dec, c2Dbl;
int32 c5Dec, c5Dbl;
int wAddHi, wT;
@@ -893,13 +893,13 @@ static double AdjustDbl(double dbl, const EncodedChar *prgch, int32 cch, int32 l
String to Double.
***************************************************************************/
template
-double Js::NumberUtilities::StrToDbl( const EncodedChar *psz, const EncodedChar **ppchLim, bool& likelyInt )
+double Js::NumberUtilities::StrToDbl( const EncodedChar *psz, const EncodedChar **ppchLim, LikelyNumberType& likelyNumberType, bool isBigIntEnabled)
{
uint32 lu;
BIGNUM num;
BIGNUM numHi;
BIGNUM numLo;
- double dbl;
+ double dbl = 0;
double dblLo;
#if DBG
bool canUseLowPrec = false;
@@ -990,12 +990,19 @@ double Js::NumberUtilities::StrToDbl( const EncodedChar *psz, const EncodedChar
case 'E':
case 'e':
goto LGetExp;
+ case 'n':
+ if (isBigIntEnabled)
+ {
+ goto LBigInt;
+ }
+ default:
+ likelyNumberType = LikelyNumberType::Int;
}
goto LEnd;
LGetRight:
Assert(*pch == '.');
- likelyInt = false;
+ likelyNumberType = LikelyNumberType::Double;
pch++;
if (NULL == pchMinDig)
{
@@ -1043,6 +1050,13 @@ double Js::NumberUtilities::StrToDbl( const EncodedChar *psz, const EncodedChar
if (lwExp > 100000000)
lwExp = 100000000;
}
+ goto LEnd;
+
+LBigInt:
+ pch++;
+ likelyNumberType = LikelyNumberType::BigInt;
+ *ppchLim = pch;
+ goto LDone;
LEnd:
*ppchLim = pch;
@@ -1256,8 +1270,8 @@ double Js::NumberUtilities::StrToDbl( const EncodedChar *psz, const EncodedChar
return dbl;
}
-template double Js::NumberUtilities::StrToDbl( const char16 * psz, const char16 **ppchLim, bool& likelyInt );
-template double Js::NumberUtilities::StrToDbl(const utf8char_t * psz, const utf8char_t **ppchLim, bool& likelyInt);
+template double Js::NumberUtilities::StrToDbl( const char16 * psz, const char16 **ppchLim, LikelyNumberType& likelyInt, bool isBigIntEnabled );
+template double Js::NumberUtilities::StrToDbl(const utf8char_t * psz, const utf8char_t **ppchLim, LikelyNumberType& likelyInt, bool isBigIntEnabled );
/***************************************************************************
Uses big integer arithmetic to get the sequence of digits.
@@ -1272,9 +1286,9 @@ static BOOL FDblToRgbPrecise(double dbl, __out_ecount(kcbMaxRgb) byte *prgb, int
int wExp10, wExp2, w1, w2;
int c2Num, c2Den, c5Num, c5Den;
double dblT;
- Js::BigInt biNum, biDen, biHi, biLo;
- Js::BigInt *pbiLo;
- Js::BigInt biT;
+ Js::BigUInt biNum, biDen, biHi, biLo;
+ Js::BigUInt *pbiLo;
+ Js::BigUInt biT;
uint32 rglu[2];
// Caller should take care of 0, negative and non-finite values.
@@ -2439,8 +2453,8 @@ BOOL Js::NumberUtilities::FNonZeroFiniteDblToStr(double dbl, __out_ecount(cchDst
{
if (FormatDigits(rgb, pbLim, wExp10, pchDst, cchDst))
{
- bool likelyInt = true;
- dblT = StrToDbl(pchDst, &pch,likelyInt);
+ LikelyNumberType likelyInt = LikelyNumberType::Int;
+ dblT = StrToDbl(pchDst, &pch, likelyInt);
Assert(0 == *pch);
Assert(dblT == dbl);
}
@@ -2465,7 +2479,7 @@ BOOL Js::NumberUtilities::FNonZeroFiniteDblToStr(double dbl, __out_ecount(cchDst
}
#if DBG
- bool likelyInt = true;
+ LikelyNumberType likelyInt = LikelyNumberType::Int;
dblT = StrToDbl(pchDst, &pch, likelyInt);
Assert(0 == *pch);
Assert(dblT == dbl);
diff --git a/deps/chakrashim/core/lib/Common/Common/RejitReasons.h b/deps/chakrashim/core/lib/Common/Common/RejitReasons.h
index 0c35143eeb7..285c0c5221d 100644
--- a/deps/chakrashim/core/lib/Common/Common/RejitReasons.h
+++ b/deps/chakrashim/core/lib/Common/Common/RejitReasons.h
@@ -49,4 +49,5 @@ REJIT_REASON(ModByPowerOf2)
REJIT_REASON(NoProfile)
REJIT_REASON(PowIntIntTypeSpecDisabled)
REJIT_REASON(DisableStackArgOpt)
+REJIT_REASON(DisableStackArgLenAndConstOpt)
REJIT_REASON(OptimizeTryFinallyDisabled)
diff --git a/deps/chakrashim/core/lib/Common/CommonDefines.h b/deps/chakrashim/core/lib/Common/CommonDefines.h
index 4a662f49b58..c330e1efebc 100644
--- a/deps/chakrashim/core/lib/Common/CommonDefines.h
+++ b/deps/chakrashim/core/lib/Common/CommonDefines.h
@@ -12,10 +12,6 @@
#include "Warnings.h"
#include "ChakraCoreVersion.h"
-// CFG was never enabled for ARM32 and requires WIN10 SDK
-#if !defined(_M_ARM) && defined(_WIN32) && defined(NTDDI_WIN10)
-#define _CONTROL_FLOW_GUARD 1
-#endif
//----------------------------------------------------------------------------------------------------
// Default debug/fretest/release flags values
@@ -136,9 +132,10 @@
// Language features
#if !defined(CHAKRACORE_LITE) && (defined(_WIN32) || defined(INTL_ICU))
#define ENABLE_INTL_OBJECT // Intl support
-#define ENABLE_JS_BUILTINS // Built In functions support
#endif
+#define ENABLE_JS_BUILTINS // Built In functions support
+
#if defined(_WIN32) && !defined(HAS_ICU)
#define INTL_WINGLOB 1
#endif
@@ -320,9 +317,7 @@
#endif
// Other features
-#if defined(_CHAKRACOREBUILD)
-# define CHAKRA_CORE_DOWN_COMPAT 1
-#endif
+// #define CHAKRA_CORE_DOWN_COMPAT 1
// todo:: Enable vectorcall on NTBUILD. OS#13609380
#if defined(_WIN32) && !defined(NTBUILD) && defined(_M_IX86)
@@ -349,6 +344,7 @@
#define ENABLE_FOUNDATION_OBJECT
#define ENABLE_EXPERIMENTAL_FLAGS
#define ENABLE_WININET_PROFILE_DATA_CACHE
+#define ENABLE_COMPRESSION_UTILITIES
#define ENABLE_BASIC_TELEMETRY
#define ENABLE_DOM_FAST_PATH
#define EDIT_AND_CONTINUE
diff --git a/deps/chakrashim/core/lib/Common/CommonPal.h b/deps/chakrashim/core/lib/Common/CommonPal.h
index dd63828ae77..35b54842f05 100644
--- a/deps/chakrashim/core/lib/Common/CommonPal.h
+++ b/deps/chakrashim/core/lib/Common/CommonPal.h
@@ -103,13 +103,6 @@
#define get_cpuid __cpuid
-#if defined(__clang__)
-__forceinline void __int2c()
-{
- __asm int 0x2c
-}
-#endif
-
#else // !_WIN32
#define USING_PAL_STDLIB 1
@@ -499,7 +492,7 @@ DWORD __cdecl CharUpperBuffW(const char16* lpsz, DWORD cchLength);
#endif
// `typename QualifiedName` declarations outside of template code not supported before MSVC 2015 update 1
-#if defined(_MSC_VER) && _MSC_VER < 1910
+#if defined(_MSC_VER) && _MSC_VER < 1910 && !defined(__clang__)
#define _TYPENAME
#else
#define _TYPENAME typename
diff --git a/deps/chakrashim/core/lib/Common/ConfigFlagsList.h b/deps/chakrashim/core/lib/Common/ConfigFlagsList.h
index c40ef309df8..b74342f0ec2 100644
--- a/deps/chakrashim/core/lib/Common/ConfigFlagsList.h
+++ b/deps/chakrashim/core/lib/Common/ConfigFlagsList.h
@@ -159,12 +159,14 @@ PHASE(All)
PHASE(DepolymorphizeInlinees)
PHASE(ReuseAuxSlotPtr)
PHASE(PolyEquivTypeGuard)
+ PHASE(DeadStoreTypeChecksOnStores)
#if DBG
PHASE(SimulatePolyCacheWithOneTypeForFunction)
#endif
PHASE(CheckThis)
PHASE(StackArgOpt)
PHASE(StackArgFormalsOpt)
+ PHASE(StackArgLenConstOpt)
PHASE(IndirCopyProp)
PHASE(ArrayCheckHoist)
PHASE(ArrayMissingValueCheckHoist)
@@ -258,6 +260,7 @@ PHASE(All)
PHASE(PrologEpilog)
PHASE(InsertNOPs)
PHASE(Encoder)
+ PHASE(Assembly)
PHASE(Emitter)
PHASE(DebugBreak)
#if defined(_M_IX86) || defined(_M_X64)
@@ -446,6 +449,7 @@ PHASE(All)
#define DEFAULT_CONFIG_HybridFgJitBgQueueLengthThreshold (32)
#define DEFAULT_CONFIG_Prejit (false)
#define DEFAULT_CONFIG_ParserStateCache (false)
+#define DEFAULT_CONFIG_CompressParserStateCache (false)
#define DEFAULT_CONFIG_DeferTopLevelTillFirstCall (true)
#define DEFAULT_CONFIG_DirectCallTelemetryStats (false)
#define DEFAULT_CONFIG_errorStackTrace (true)
@@ -559,6 +563,8 @@ PHASE(All)
#define DEFAULT_CONFIG_RegexTracing (false)
#define DEFAULT_CONFIG_RegexProfile (false)
#define DEFAULT_CONFIG_RegexDebug (false)
+#define DEFAULT_CONFIG_RegexDebugAST (true)
+#define DEFAULT_CONFIG_RegexDebugAnnotatedAST (true)
#define DEFAULT_CONFIG_RegexBytecodeDebug (false)
#define DEFAULT_CONFIG_RegexOptimize (true)
#define DEFAULT_CONFIG_DynamicRegexMruListSize (16)
@@ -586,6 +592,7 @@ PHASE(All)
#endif
#define DEFAULT_CONFIG_JitRepro (false)
#define DEFAULT_CONFIG_LdChakraLib (false)
+#define DEFAULT_CONFIG_TestChakraLib (false)
#define DEFAULT_CONFIG_EntryPointInfoRpcData (false)
// ES6 DEFAULT BEHAVIOR
@@ -627,12 +634,17 @@ PHASE(All)
#define DEFAULT_CONFIG_ES6Spread (true)
#define DEFAULT_CONFIG_ES6String (true)
#define DEFAULT_CONFIG_ES6StringPrototypeFixes (true)
+#define DEFAULT_CONFIG_ES2018ObjectRestSpread (false)
+
+#ifndef DEFAULT_CONFIG_ES6PrototypeChain
#ifdef COMPILE_DISABLE_ES6PrototypeChain
// If ES6PrototypeChain needs to be disabled by compile flag, DEFAULT_CONFIG_ES6PrototypeChain should be false
#define DEFAULT_CONFIG_ES6PrototypeChain (false)
#else
- #define DEFAULT_CONFIG_ES6PrototypeChain (false)
+ #define DEFAULT_CONFIG_ES6PrototypeChain (true)
#endif
+#endif
+
#define DEFAULT_CONFIG_ES6ToPrimitive (true)
#define DEFAULT_CONFIG_ES6ToLength (true)
#define DEFAULT_CONFIG_ES6ToStringTag (true)
@@ -640,6 +652,8 @@ PHASE(All)
#define DEFAULT_CONFIG_ES6UnicodeVerbose (true)
#define DEFAULT_CONFIG_ES6Unscopables (true)
#define DEFAULT_CONFIG_ES6RegExSticky (true)
+#define DEFAULT_CONFIG_ES2018RegExDotAll (true)
+#define DEFAULT_CONFIG_ESBigInt (false)
#ifdef COMPILE_DISABLE_ES6RegExPrototypeProperties
// If ES6RegExPrototypeProperties needs to be disabled by compile flag, DEFAULT_CONFIG_ES6RegExPrototypeProperties should be false
#define DEFAULT_CONFIG_ES6RegExPrototypeProperties (false)
@@ -659,6 +673,7 @@ PHASE(All)
#define DEFAULT_CONFIG_ES7ValuesEntries (true)
#define DEFAULT_CONFIG_ESObjectGetOwnPropertyDescriptors (true)
#define DEFAULT_CONFIG_ESDynamicImport (false)
+#define DEFAULT_CONFIG_ESExportNsAs (true)
#define DEFAULT_CONFIG_ESSharedArrayBuffer (false)
@@ -763,7 +778,7 @@ PHASE(All)
#define DEFAULT_CONFIG_LibraryStackFrameDebugger (false)
#define DEFAULT_CONFIG_FuncObjectInlineCacheThreshold (2) // Maximum number of inline caches a function body may have to allow for inline caches to be allocated on the function object.
-#define DEFAULT_CONFIG_ShareInlineCaches (true)
+#define DEFAULT_CONFIG_ShareInlineCaches (false)
#define DEFAULT_CONFIG_InlineCacheInvalidationListCompactionThreshold (4)
#define DEFAULT_CONFIG_ConstructorCacheInvalidationThreshold (500)
@@ -1017,6 +1032,7 @@ FLAGNR(Phases, DebugBreakOnPhaseBegin, "Break into debugger at the beginning of
FLAGNR(Boolean, DebugWindow , "Send console output to debugger window", false)
FLAGNR(Boolean, ParserStateCache , "Enable creation of parser state cache", DEFAULT_CONFIG_ParserStateCache)
+FLAGNR(Boolean, CompressParserStateCache, "Enable compression of the parser state cache", DEFAULT_CONFIG_CompressParserStateCache)
FLAGNR(Boolean, DeferTopLevelTillFirstCall , "Enable tracking of deferred top level functions in a script file, until the first function of the script context is parsed.", DEFAULT_CONFIG_DeferTopLevelTillFirstCall)
FLAGNR(Number, DeferParse , "Minimum size of defer-parsed script (non-zero only: use /nodeferparse do disable", 0)
FLAGNR(Boolean, DirectCallTelemetryStats, "Enables logging stats for direct call telemetry", DEFAULT_CONFIG_DirectCallTelemetryStats)
@@ -1071,6 +1087,8 @@ FLAGNR(Boolean, JitRepro , "Add Function.invokeJit to execute codeg
FLAGNR(Boolean, EntryPointInfoRpcData , "Keep encoded rpc buffer for jitted function on EntryPointInfo until cleanup", DEFAULT_CONFIG_EntryPointInfoRpcData)
FLAGNR(Boolean, LdChakraLib , "Access to the Chakra internal library with the __chakraLibrary keyword", DEFAULT_CONFIG_LdChakraLib)
+FLAGNR(Boolean, TestChakraLib , "Access to the Chakra internal library with the __chakraLibrary keyword without global access restriction", DEFAULT_CONFIG_TestChakraLib)
+
// ES6 (BLUE+1) features/flags
// Master ES6 flag to enable STABLE ES6 features/flags
@@ -1121,11 +1139,9 @@ FLAGPR (Boolean, ES6, ES6Rest , "Enable ES6 Rest parame
FLAGPR (Boolean, ES6, ES6Spread , "Enable ES6 Spread support" , DEFAULT_CONFIG_ES6Spread)
FLAGPR (Boolean, ES6, ES6String , "Enable ES6 String extensions" , DEFAULT_CONFIG_ES6String)
FLAGPR (Boolean, ES6, ES6StringPrototypeFixes, "Enable ES6 String.prototype fixes" , DEFAULT_CONFIG_ES6StringPrototypeFixes)
+FLAGPR (Boolean, ES6, ES2018ObjectRestSpread , "Enable ES2018 Object Rest/Spread" , DEFAULT_CONFIG_ES2018ObjectRestSpread)
-#ifndef COMPILE_DISABLE_ES6PrototypeChain
- #define COMPILE_DISABLE_ES6PrototypeChain 0
-#endif
-FLAGPR_REGOVR_EXP(Boolean, ES6, ES6PrototypeChain , "Enable ES6 prototypes (Example: Date prototype is object)", DEFAULT_CONFIG_ES6PrototypeChain)
+FLAGPR (Boolean, ES6, ES6PrototypeChain , "Enable ES6 prototypes (Example: Date prototype is object)", DEFAULT_CONFIG_ES6PrototypeChain)
FLAGPR (Boolean, ES6, ES6ToPrimitive , "Enable ES6 ToPrimitive symbol" , DEFAULT_CONFIG_ES6ToPrimitive)
FLAGPR (Boolean, ES6, ES6ToLength , "Enable ES6 ToLength fixes" , DEFAULT_CONFIG_ES6ToLength)
FLAGPR (Boolean, ES6, ES6ToStringTag , "Enable ES6 ToStringTag symbol" , DEFAULT_CONFIG_ES6ToStringTag)
@@ -1133,6 +1149,8 @@ FLAGPR (Boolean, ES6, ES6Unicode , "Enable ES6 Unicode 6.0
FLAGPR (Boolean, ES6, ES6UnicodeVerbose , "Enable ES6 Unicode 6.0 verbose failure output" , DEFAULT_CONFIG_ES6UnicodeVerbose)
FLAGPR (Boolean, ES6, ES6Unscopables , "Enable ES6 With Statement Unscopables" , DEFAULT_CONFIG_ES6Unscopables)
FLAGPR (Boolean, ES6, ES6RegExSticky , "Enable ES6 RegEx sticky flag" , DEFAULT_CONFIG_ES6RegExSticky)
+FLAGPR (Boolean, ES6, ES2018RegExDotAll , "Enable ES2018 RegEx dotAll flag" , DEFAULT_CONFIG_ES2018RegExDotAll)
+FLAGPR (Boolean, ES6, ESExportNsAs , "Enable ES experimental export * as name" , DEFAULT_CONFIG_ESExportNsAs)
#ifndef COMPILE_DISABLE_ES6RegExPrototypeProperties
#define COMPILE_DISABLE_ES6RegExPrototypeProperties 0
@@ -1167,6 +1185,9 @@ FLAGNR(Boolean, WinRTDelegateInterfaces , "Treat WinRT Delegates as Interfaces w
FLAGR(Boolean, WinRTAdaptiveApps , "Enable the adaptive apps feature, allowing for variable projection." , DEFAULT_CONFIG_WinRTAdaptiveApps)
#endif
+// ES BigInt flag
+FLAGR(Boolean, ESBigInt, "Enable ESBigInt flag", DEFAULT_CONFIG_ESBigInt)
+
// This flag to be removed once JITing generator functions is stable
FLAGNR(Boolean, JitES6Generators , "Enable JITing of ES6 generators", false)
@@ -1540,6 +1561,8 @@ FLAGNR(Boolean, ValidateHeapEnum , "Validate that heap enumeration is repor
FLAGR (Boolean, RegexTracing , "Trace all Regex invocations to the output.", DEFAULT_CONFIG_RegexTracing)
FLAGR (Boolean, RegexProfile , "Collect usage statistics on all Regex invocations.", DEFAULT_CONFIG_RegexProfile)
FLAGR (Boolean, RegexDebug , "Trace compilation of UnifiedRegex expressions.", DEFAULT_CONFIG_RegexDebug)
+FLAGR (Boolean, RegexDebugAST , "Display Regex AST (requires -RegexDebug to view). [default on]", DEFAULT_CONFIG_RegexDebugAST)
+FLAGR (Boolean, RegexDebugAnnotatedAST, "Display Regex Annotated AST (requires -RegexDebug and -RegexDebugAST to view). [default on]", DEFAULT_CONFIG_RegexDebugAnnotatedAST)
FLAGR (Boolean, RegexBytecodeDebug , "Display layout of UnifiedRegex bytecode (requires -RegexDebug to view).", DEFAULT_CONFIG_RegexBytecodeDebug)
FLAGR (Boolean, RegexOptimize , "Optimize regular expressions in the unified Regex system (default: true)", DEFAULT_CONFIG_RegexOptimize)
FLAGR (Number, DynamicRegexMruListSize, "Size of the MRU list for dynamic regexes", DEFAULT_CONFIG_DynamicRegexMruListSize)
diff --git a/deps/chakrashim/core/lib/Common/Core/Assertions.h b/deps/chakrashim/core/lib/Common/Core/Assertions.h
index 65779e41f56..34aa46fc477 100644
--- a/deps/chakrashim/core/lib/Common/Core/Assertions.h
+++ b/deps/chakrashim/core/lib/Common/Core/Assertions.h
@@ -140,4 +140,4 @@ struct IsSame
{
IsTrue = true
};
-};
\ No newline at end of file
+};
diff --git a/deps/chakrashim/core/lib/Common/Core/CMakeLists.txt b/deps/chakrashim/core/lib/Common/Core/CMakeLists.txt
index 626df8974f2..2c3fb3915fa 100644
--- a/deps/chakrashim/core/lib/Common/Core/CMakeLists.txt
+++ b/deps/chakrashim/core/lib/Common/Core/CMakeLists.txt
@@ -11,7 +11,6 @@ add_library (Chakra.Common.Core OBJECT
DelayLoadLibrary.cpp
EtwTraceCore.cpp
FaultInjection.cpp
- GlobalSecurityPolicy.cpp
Output.cpp
PerfCounter.cpp
PerfCounterImpl.cpp
diff --git a/deps/chakrashim/core/lib/Common/Core/CommonTypedefs.h b/deps/chakrashim/core/lib/Common/Core/CommonTypedefs.h
index df1f797a73f..0a0870e09a1 100644
--- a/deps/chakrashim/core/lib/Common/Core/CommonTypedefs.h
+++ b/deps/chakrashim/core/lib/Common/Core/CommonTypedefs.h
@@ -61,3 +61,6 @@ namespace Js
{
typedef uint32 LocalFunctionId;
};
+
+// digit_t represents a digit in bigint underline
+typedef uintptr_t digit_t;
diff --git a/deps/chakrashim/core/lib/Common/Core/ConfigParser.h b/deps/chakrashim/core/lib/Common/Core/ConfigParser.h
index abbafa9601a..136c0c42792 100644
--- a/deps/chakrashim/core/lib/Common/Core/ConfigParser.h
+++ b/deps/chakrashim/core/lib/Common/Core/ConfigParser.h
@@ -40,8 +40,8 @@ class ConfigParser
void ParseRegistryKey(HKEY hk, CmdLineArgsParser &parser);
#ifdef _WIN32
- static void ConfigParser::SetConfigStringFromRegistry(_In_ HKEY hk, _In_z_ const char16* subKeyName, _In_z_ const char16* valName, _Inout_ Js::String& str);
- static void ConfigParser::ReadRegistryString(_In_ HKEY hk, _In_z_ const char16* subKeyName, _In_z_ const char16* valName, _Outptr_result_maybenull_z_ const char16** sz, _Out_ DWORD* length);
+ static void SetConfigStringFromRegistry(_In_ HKEY hk, _In_z_ const char16* subKeyName, _In_z_ const char16* valName, _Inout_ Js::String& str);
+ static void ReadRegistryString(_In_ HKEY hk, _In_z_ const char16* subKeyName, _In_z_ const char16* valName, _Outptr_result_maybenull_z_ const char16** sz, _Out_ DWORD* length);
#endif
public:
diff --git a/deps/chakrashim/core/lib/Common/Core/EtwTraceCore.h b/deps/chakrashim/core/lib/Common/Core/EtwTraceCore.h
index f50f84cd985..eed362b83ba 100644
--- a/deps/chakrashim/core/lib/Common/Core/EtwTraceCore.h
+++ b/deps/chakrashim/core/lib/Common/Core/EtwTraceCore.h
@@ -78,7 +78,7 @@ CompileAssert(false)
#pragma prefast(push)
#pragma prefast(disable:__WARNING_USING_UNINIT_VAR, "The ETW data generated from the manifest includes a default null function which uses unintialized memory.")
-#include
+#include
#ifdef NTBUILD
#include
#include
diff --git a/deps/chakrashim/core/lib/Common/Core/FaultInjection.cpp b/deps/chakrashim/core/lib/Common/Core/FaultInjection.cpp
index 216e89974cd..201a67c3c4e 100644
--- a/deps/chakrashim/core/lib/Common/Core/FaultInjection.cpp
+++ b/deps/chakrashim/core/lib/Common/Core/FaultInjection.cpp
@@ -150,7 +150,7 @@ namespace Js
else
{
RtlVirtualUnwind(UNW_FLAG_NHANDLER, ImageBase, Context.Rip, RuntimeFunction,
- &Context, &HandlerData, &EstablisherFrame, &NvContext);
+ &Context, &HandlerData, &EstablisherFrame, NULL);
}
if (!Context.Rip)
diff --git a/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.cpp b/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.cpp
index f946bd0462d..ec9355211f2 100644
--- a/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.cpp
+++ b/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.cpp
@@ -5,126 +5,32 @@
#include "CommonCorePch.h"
-#ifdef _WIN32
-
-#include
-
-
-CriticalSection GlobalSecurityPolicy::s_policyCS;
-GlobalSecurityPolicy GlobalSecurityObject;
-
#pragma section(".mrdata", read)
-// Note: 'volatile' is necessary here otherwise the compiler assumes these are constants initialized to '0' and will constant propagate them...
-__declspec(allocate(".mrdata")) volatile GlobalSecurityPolicy::ReadOnlyData GlobalSecurityPolicy::readOnlyData =
- {
-#if defined(_CONTROL_FLOW_GUARD)
- nullptr,
- nullptr,
-#endif
- false,
- false,
- false
- };
-
-bool
-GlobalSecurityPolicy::IsCFGEnabled()
-{
- return readOnlyData.isCFGEnabled && !PHASE_OFF1(Js::CFGPhase);
-}
-
-bool
-GlobalSecurityPolicy::InitIsCFGEnabled()
-{
-#if defined(_CONTROL_FLOW_GUARD)
- PROCESS_MITIGATION_CONTROL_FLOW_GUARD_POLICY CfgPolicy;
- BOOL isGetMitigationPolicySucceeded = GlobalSecurityPolicy::GetMitigationPolicyForProcess(
- GetCurrentProcess(),
- ProcessControlFlowGuardPolicy,
- &CfgPolicy,
- sizeof(CfgPolicy));
- AssertOrFailFast(isGetMitigationPolicySucceeded);
- return CfgPolicy.EnableControlFlowGuard;
-
-#else
- return false;
-#endif // _CONTROL_FLOW_GUARD
-}
-
-GlobalSecurityPolicy::GlobalSecurityPolicy()
-{
-#if defined(_CONTROL_FLOW_GUARD)
- AutoCriticalSection autocs(&s_policyCS);
- DWORD oldProtect;
-
- // Make sure this is called only once
- AssertOrFailFast(!readOnlyData.isInitialized);
-
-#if defined(CHAKRA_CORE_DOWN_COMPAT)
- if (AutoSystemInfo::Data.IsWinThresholdOrLater())
-#endif
- {
- // Make readOnlyData read-write
- BOOL res = VirtualProtect((LPVOID)&readOnlyData, sizeof(ReadOnlyData), PAGE_READWRITE, &oldProtect);
- if ((res == FALSE) || (oldProtect != PAGE_READONLY))
- {
- RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
- }
-
- readOnlyData.isInitialized = true;
-
- EnsureFromSystemDirOnly();
-
- if (m_hModule)
- {
- readOnlyData.pfnGetProcessMitigationPolicy = (PFNCGetMitigationPolicyForProcess)GetFunction("GetProcessMitigationPolicy");
- if (readOnlyData.pfnGetProcessMitigationPolicy == nullptr)
- {
- RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
- }
-
- readOnlyData.isCFGEnabled = InitIsCFGEnabled();
-
- if (readOnlyData.isCFGEnabled)
- {
- readOnlyData.pfnSetProcessValidCallTargets = (PFNCSetProcessValidCallTargets)GetFunction("SetProcessValidCallTargets");
- if (readOnlyData.pfnSetProcessValidCallTargets == nullptr)
- {
- RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
- }
- }
- }
-
- // Make readOnlyData read-only again.
- res = VirtualProtect((LPVOID)&readOnlyData, sizeof(ReadOnlyData), PAGE_READONLY, &oldProtect);
- if ((res == FALSE) || (oldProtect != PAGE_READWRITE))
- {
- RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
- }
- }
+CriticalSection GlobalSecurityPolicy::s_policyCS;
-#endif //_CONTROL_FLOW_GUARD
- }
+__declspec(allocate(".mrdata"))
+volatile bool GlobalSecurityPolicy::s_ro_disableSetProcessValidCallTargets = false;
void
GlobalSecurityPolicy::DisableSetProcessValidCallTargets()
{
// One-way transition from allowing SetProcessValidCallTargets to disabling
// the API.
- if (!readOnlyData.disableSetProcessValidCallTargets)
+ if (!s_ro_disableSetProcessValidCallTargets)
{
AutoCriticalSection autocs(&s_policyCS);
DWORD oldProtect;
- BOOL res = VirtualProtect((LPVOID)&readOnlyData, sizeof(ReadOnlyData), PAGE_READWRITE, &oldProtect);
+ BOOL res = VirtualProtect((LPVOID)&s_ro_disableSetProcessValidCallTargets, sizeof(s_ro_disableSetProcessValidCallTargets), PAGE_READWRITE, &oldProtect);
if ((res == FALSE) || (oldProtect != PAGE_READONLY))
{
RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
}
- readOnlyData.disableSetProcessValidCallTargets = true;
+ s_ro_disableSetProcessValidCallTargets = true;
- res = VirtualProtect((LPVOID)&readOnlyData, sizeof(ReadOnlyData), PAGE_READONLY, &oldProtect);
+ res = VirtualProtect((LPVOID)&s_ro_disableSetProcessValidCallTargets, sizeof(s_ro_disableSetProcessValidCallTargets), PAGE_READONLY, &oldProtect);
if ((res == FALSE) || (oldProtect != PAGE_READWRITE))
{
RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS);
@@ -135,20 +41,5 @@ GlobalSecurityPolicy::DisableSetProcessValidCallTargets()
bool
GlobalSecurityPolicy::IsSetProcessValidCallTargetsAllowed()
{
- return !readOnlyData.disableSetProcessValidCallTargets;
-}
-
-#if defined(_CONTROL_FLOW_GUARD)
-BOOL
-DECLSPEC_GUARDNOCF GlobalSecurityPolicy::GetMitigationPolicyForProcess(HANDLE hProcess, PROCESS_MITIGATION_POLICY mitigationPolicy, PVOID lpBuffer, SIZE_T dwLength)
-{
- return GlobalSecurityPolicy::readOnlyData.pfnGetProcessMitigationPolicy(hProcess, mitigationPolicy, lpBuffer, dwLength);
-}
-
-BOOL
-DECLSPEC_GUARDNOCF GlobalSecurityPolicy::SetProcessValidCallTargets(HANDLE hProcess, PVOID virtualAddress, SIZE_T regionSize, ULONG numberOfOffsets, PCFG_CALL_TARGET_INFO offsetInformation)
-{
- return GlobalSecurityPolicy::readOnlyData.pfnSetProcessValidCallTargets(hProcess, virtualAddress, regionSize, numberOfOffsets, offsetInformation);
+ return !s_ro_disableSetProcessValidCallTargets;
}
-#endif //_CONTROL_FLOW_GUARD
-#endif // _WIN32
diff --git a/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.h b/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.h
index b2df38c8a9b..5ba99d814b3 100644
--- a/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.h
+++ b/deps/chakrashim/core/lib/Common/Core/GlobalSecurityPolicy.h
@@ -4,46 +4,14 @@
//-------------------------------------------------------------------------------------------------------
#pragma once
-#include "DelayLoadLibrary.h"
-
-class GlobalSecurityPolicy : private DelayLoadLibrary
+class GlobalSecurityPolicy
{
public:
-#ifdef _WIN32
-#if defined(_CONTROL_FLOW_GUARD)
- typedef BOOL FNCGetMitigationPolicyForProcess(HANDLE, PROCESS_MITIGATION_POLICY, PVOID, SIZE_T);
- typedef FNCGetMitigationPolicyForProcess* PFNCGetMitigationPolicyForProcess;
-
- typedef BOOL FNCSetProcessValidCallTargets(HANDLE, PVOID, SIZE_T, ULONG, PCFG_CALL_TARGET_INFO);
- typedef FNCSetProcessValidCallTargets* PFNCSetProcessValidCallTargets;
-#endif
- GlobalSecurityPolicy();
-
static void DisableSetProcessValidCallTargets();
static bool IsSetProcessValidCallTargetsAllowed();
- static bool IsCFGEnabled();
-
-#if defined(_CONTROL_FLOW_GUARD)
- static FNCGetMitigationPolicyForProcess GetMitigationPolicyForProcess;
- static FNCSetProcessValidCallTargets SetProcessValidCallTargets;
-#endif
- LPCTSTR GetLibraryName() const { return _u("api-ms-win-core-memory-l1-1-3.dll"); }
private:
static CriticalSection s_policyCS;
- volatile static struct ReadOnlyData {
-#if defined(_CONTROL_FLOW_GUARD)
- PFNCGetMitigationPolicyForProcess pfnGetProcessMitigationPolicy;
- PFNCSetProcessValidCallTargets pfnSetProcessValidCallTargets;
-#endif
- bool disableSetProcessValidCallTargets;
- bool isCFGEnabled;
- bool isInitialized;
- } readOnlyData;
-
- static bool InitIsCFGEnabled();
-#else
- static bool IsCFGEnabled() { return false; }
-#endif
+ static volatile bool s_ro_disableSetProcessValidCallTargets;
};
diff --git a/deps/chakrashim/core/lib/Common/Core/Output.cpp b/deps/chakrashim/core/lib/Common/Core/Output.cpp
index c6766fd8922..d3ce70255ff 100644
--- a/deps/chakrashim/core/lib/Common/Core/Output.cpp
+++ b/deps/chakrashim/core/lib/Common/Core/Output.cpp
@@ -46,6 +46,11 @@ THREAD_ST WORD Output::s_color = 0;
THREAD_ST bool Output::s_hasColor = false;
THREAD_ST bool Output::s_capture = false;
+THREAD_ST bool Output::hasDoneAlignPrefixForThisLine = false;
+THREAD_ST bool Output::usingCustomAlignAndPrefix = false;
+THREAD_ST size_t Output::align = 0;
+THREAD_ST const char16* Output::prefix = nullptr;
+
#define MAX_OUTPUT_BUFFER_SIZE 10 * 1024 * 1024 // 10 MB maximum before we force a flush
size_t __cdecl
@@ -282,6 +287,86 @@ Output::VPrint(const char16 *form, va_list argptr)
size_t __cdecl
Output::PrintBuffer(const char16 * buf, size_t size)
{
+ // Handle custom line prefixing
+ bool internallyAllocatedBuffer = false;
+ if (usingCustomAlignAndPrefix)
+ {
+ if (hasDoneAlignPrefixForThisLine && wcschr(buf, '\n') == nullptr)
+ {
+ // no newlines, and we've already prefixed this line, so nothing to do
+ }
+ else
+ {
+ size_t newbufsize = size + align;
+ char16* newbuf = (char16*)calloc(newbufsize, sizeof(char16));
+ AssertOrFailFastMsg(newbuf != nullptr, "Ran out of memory while printing output");
+ internallyAllocatedBuffer = true;
+ const char16* currentReadIndex = buf;
+ char16* currentWriteIndex = newbuf;
+ auto ensureSpace = [¤tWriteIndex, &newbuf, &newbufsize](size_t numCharsWantToWrite)
+ {
+ size_t charsWritten = (currentWriteIndex - newbuf); // pointer subtraction is number of elements of pointed type between pointers
+ size_t remaining = newbufsize - charsWritten;
+ if (numCharsWantToWrite + 1 > remaining)
+ {
+ char16* tempbuf = (char16*)realloc(newbuf, newbufsize * sizeof(char16) * 2);
+ AssertOrFailFastMsg(tempbuf != nullptr, "Ran out of memory while printing output");
+ newbuf = tempbuf;
+ newbufsize = newbufsize * 2;
+ currentWriteIndex = newbuf + charsWritten;
+ }
+ };
+ const size_t prefixlength = wcslen(prefix);
+ size_t oldS_Column = Output::s_Column;
+ while (currentReadIndex < buf + size)
+ {
+ if (!hasDoneAlignPrefixForThisLine)
+ {
+ // attempt to write the alignment
+ {
+ unsigned int alignspacesneeded = 1; // always put at least one space
+ if (oldS_Column < align)
+ {
+ alignspacesneeded = (unsigned int)(align - oldS_Column);
+ }
+ ensureSpace(alignspacesneeded);
+ for (unsigned int i = 0; i < alignspacesneeded; i++)
+ {
+ *(currentWriteIndex++) = ' ';
+ }
+ }
+ // attempt to write the prefix
+ ensureSpace(prefixlength);
+ js_wmemcpy_s(currentWriteIndex, (newbuf + newbufsize) - currentWriteIndex, Output::prefix, prefixlength);
+ currentWriteIndex += prefixlength;
+ oldS_Column = align + prefixlength;
+ hasDoneAlignPrefixForThisLine = true;
+ }
+ const char16* endOfLine = wcschr(currentReadIndex, '\n');
+ size_t charsToCopy = 0;
+ if (endOfLine != nullptr)
+ {
+ charsToCopy = (endOfLine - currentReadIndex) + 1; // We want to grab the newline character as part of this line
+ oldS_Column = 0; // We're ending this line, and want the next to be calculated properly
+ hasDoneAlignPrefixForThisLine = false; // The next line will need this
+ }
+ else
+ {
+ charsToCopy = (buf + size) - currentReadIndex; // the rest of the input buffer
+ oldS_Column += charsToCopy; // Will be reset anyway later on
+ }
+ ensureSpace(endOfLine - currentReadIndex);
+ js_wmemcpy_s(currentWriteIndex, (newbuf + newbufsize) - currentWriteIndex, currentReadIndex, charsToCopy);
+ currentReadIndex += charsToCopy;
+ currentWriteIndex += charsToCopy;
+ }
+ // null terminate becuase there's no real reason not to
+ ensureSpace(1);
+ *(currentWriteIndex++) = '\0';
+ buf = newbuf;
+ size = (currentWriteIndex - newbuf) - 1; // not counting the terminator here though, to align with vsnwprintf_s's behavior
+ }
+ }
Output::s_Column += size;
const char16 * endbuf = wcschr(buf, '\n');
while (endbuf != nullptr)
@@ -443,7 +528,19 @@ void Output::DirectPrint(char16 const * string)
void
Output::SkipToColumn(size_t column)
{
- if (column <= Output::s_Column)
+ size_t columnbias = 0;
+ // If we're using a custom alignment and prefix, we want to do this relative to that
+ if (usingCustomAlignAndPrefix)
+ {
+ // If we've already added the alignment and prefix, we need to add the alignment to our column number here
+ columnbias = align + wcslen(prefix);
+ }
+ size_t reference = 0;
+ if (Output::s_Column > columnbias)
+ {
+ reference = Output::s_Column - columnbias;
+ }
+ if (column <= reference)
{
Output::Print(_u(" "));
return;
@@ -451,7 +548,7 @@ Output::SkipToColumn(size_t column)
// compute distance to our destination
- size_t dist = column - Output::s_Column;
+ size_t dist = column - reference;
// Print at least one space
while (dist > 0)
@@ -564,3 +661,20 @@ Output::CaptureEnd()
return returnBuffer;
}
+
+void
+Output::SetAlignAndPrefix(unsigned int align, const char16 *prefix)
+{
+ Output::hasDoneAlignPrefixForThisLine = false;
+ Output::usingCustomAlignAndPrefix = true;
+ Output::prefix = prefix;
+ Output::align = align;
+}
+void
+Output::ResetAlignAndPrefix()
+{
+ Output::hasDoneAlignPrefixForThisLine = false;
+ Output::usingCustomAlignAndPrefix = false;
+ Output::prefix = nullptr;
+ Output::align = 0;
+}
diff --git a/deps/chakrashim/core/lib/Common/Core/Output.h b/deps/chakrashim/core/lib/Common/Core/Output.h
index 1af3fbeed4d..3ee4f0f491c 100644
--- a/deps/chakrashim/core/lib/Common/Core/Output.h
+++ b/deps/chakrashim/core/lib/Common/Core/Output.h
@@ -114,7 +114,10 @@ class Output
static WORD SetConsoleForeground(WORD color);
static void CaptureStart();
- static char16* CaptureEnd();
+ static char16* CaptureEnd();
+
+ static void SetAlignAndPrefix(unsigned int align, const char16 *prefix);
+ static void ResetAlignAndPrefix();
private:
static void DirectPrint(const char16 * string);
@@ -134,6 +137,11 @@ class Output
#define THREAD_ST THREAD_LOCAL
+ THREAD_ST static bool hasDoneAlignPrefixForThisLine;
+ THREAD_ST static bool usingCustomAlignAndPrefix;
+ THREAD_ST static const char16* prefix;
+ THREAD_ST static size_t align;
+
THREAD_ST static bool s_capture;
THREAD_ST static FILE * s_file;
#ifdef _WIN32
diff --git a/deps/chakrashim/core/lib/Common/Core/SysInfo.cpp b/deps/chakrashim/core/lib/Common/Core/SysInfo.cpp
index 920002fe75f..188be21c99b 100644
--- a/deps/chakrashim/core/lib/Common/Core/SysInfo.cpp
+++ b/deps/chakrashim/core/lib/Common/Core/SysInfo.cpp
@@ -360,6 +360,20 @@ AutoSystemInfo::CheckForAtom() const
}
#endif
+bool
+AutoSystemInfo::IsCFGEnabled()
+{
+#if defined(_CONTROL_FLOW_GUARD)
+ return true
+#ifdef ENABLE_DEBUG_CONFIG_OPTIONS
+ && IsWinThresholdOrLater() && !PHASE_OFF1(Js::CFGPhase)
+#endif //ENABLE_DEBUG_CONFIG_OPTIONS
+ ;
+#else
+ return false;
+#endif //_CONTROL_FLOW_GUARD
+}
+
bool
AutoSystemInfo::IsWin8OrLater()
{
diff --git a/deps/chakrashim/core/lib/Common/Core/SysInfo.h b/deps/chakrashim/core/lib/Common/Core/SysInfo.h
index 5f20519088c..f709d44360b 100644
--- a/deps/chakrashim/core/lib/Common/Core/SysInfo.h
+++ b/deps/chakrashim/core/lib/Common/Core/SysInfo.h
@@ -15,6 +15,7 @@ class AutoSystemInfo : public SYSTEM_INFO
uint GetAllocationGranularityPageSize() const;
bool DisableDebugScopeCapture() const { return this->disableDebugScopeCapture; }
+ bool IsCFGEnabled();
bool IsWin8OrLater();
#if defined(_CONTROL_FLOW_GUARD)
bool IsWinThresholdOrLater();
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/BaseDictionary.h b/deps/chakrashim/core/lib/Common/DataStructures/BaseDictionary.h
index 5381bbacf60..484cdaaf6d5 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/BaseDictionary.h
+++ b/deps/chakrashim/core/lib/Common/DataStructures/BaseDictionary.h
@@ -238,7 +238,7 @@ namespace JsUtil
return entries[i].Value();
}
- const TValue Item(const TKey& key) const
+ TValue Item(const TKey& key) const
{
int i = FindEntry(key);
Assert(i >= 0);
@@ -250,6 +250,7 @@ namespace JsUtil
return Insert(key, value);
}
+ // Returns -1 if the key is already in the dictionary
int AddNew(const TKey& key, const TValue& value)
{
return Insert(key, value);
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/BigInt.cpp b/deps/chakrashim/core/lib/Common/DataStructures/BigUInt.cpp
similarity index 92%
rename from deps/chakrashim/core/lib/Common/DataStructures/BigInt.cpp
rename to deps/chakrashim/core/lib/Common/DataStructures/BigUInt.cpp
index 9908eaab594..33100257d45 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/BigInt.cpp
+++ b/deps/chakrashim/core/lib/Common/DataStructures/BigUInt.cpp
@@ -3,20 +3,20 @@
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#include "CommonDataStructuresPch.h"
-#include "DataStructures/BigInt.h"
+#include "DataStructures/BigUInt.h"
#include "Common/NumberUtilitiesBase.h"
#include "Common/NumberUtilities.h"
namespace Js
{
- BigInt & BigInt::operator= (BigInt &bi)
+ BigUInt & BigUInt::operator= (BigUInt &bi)
{
AssertMsg(false, "can't assign BigInts");
return *this;
}
#if DBG
- void BigInt::AssertValid(bool fCheckVal)
+ void BigUInt::AssertValid(bool fCheckVal)
{
Assert(m_cluMax >= kcluMaxInit);
Assert(m_prglu != 0);
@@ -26,7 +26,7 @@ namespace Js
}
#endif
- BigInt::BigInt(void)
+ BigUInt::BigUInt(void)
{
m_cluMax = kcluMaxInit;
m_clu = 0;
@@ -34,25 +34,25 @@ namespace Js
AssertBi(this);
}
- BigInt::~BigInt(void)
+ BigUInt::~BigUInt(void)
{
if (m_prglu != m_rgluInit)
free(m_prglu);
}
- int32 BigInt::Clu(void)
+ int32 BigUInt::Clu(void)
{
return m_clu;
}
- uint32 BigInt::Lu(int32 ilu)
+ uint32 BigUInt::Lu(int32 ilu)
{
AssertBi(this);
Assert(ilu < m_clu);
return m_prglu[ilu];
}
- bool BigInt::FResize(int32 clu)
+ bool BigUInt::FResize(int32 clu)
{
AssertBiNoVal(this);
@@ -79,7 +79,7 @@ namespace Js
return true;
}
- bool BigInt::FInitFromRglu(uint32 *prglu, int32 clu)
+ bool BigUInt::FInitFromRglu(uint32 *prglu, int32 clu)
{
AssertBi(this);
Assert(clu >= 0);
@@ -95,7 +95,7 @@ namespace Js
return true;
}
- bool BigInt::FInitFromBigint(BigInt *pbiSrc)
+ bool BigUInt::FInitFromBigint(BigUInt *pbiSrc)
{
AssertBi(this);
AssertBi(pbiSrc);
@@ -105,7 +105,7 @@ namespace Js
}
template
- bool BigInt::FInitFromDigits(const EncodedChar *prgch, int32 cch, int32 *pcchDig)
+ bool BigUInt::FInitFromDigits(const EncodedChar *prgch, int32 cch, int32 *pcchDig)
{
AssertBi(this);
Assert(cch >= 0);
@@ -147,7 +147,7 @@ namespace Js
return true;
}
- bool BigInt::FMulAdd(uint32 luMul, uint32 luAdd)
+ bool BigUInt::FMulAdd(uint32 luMul, uint32 luAdd)
{
AssertBi(this);
Assert(luMul != 0);
@@ -174,7 +174,7 @@ namespace Js
return true;
}
- bool BigInt::FMulPow5(int32 c5)
+ bool BigUInt::FMulPow5(int32 c5)
{
AssertBi(this);
Assert(c5 >= 0);
@@ -203,7 +203,7 @@ namespace Js
return true;
}
- bool BigInt::FShiftLeft(int32 cbit)
+ bool BigUInt::FShiftLeft(int32 cbit)
{
AssertBi(this);
Assert(cbit >= 0);
@@ -258,7 +258,7 @@ namespace Js
return true;
}
- void BigInt::ShiftLusRight(int32 clu)
+ void BigUInt::ShiftLusRight(int32 clu)
{
AssertBi(this);
Assert(clu >= 0);
@@ -278,7 +278,7 @@ namespace Js
AssertBi(this);
}
- void BigInt::ShiftRight(int32 cbit)
+ void BigUInt::ShiftRight(int32 cbit)
{
AssertBi(this);
Assert(cbit >= 0);
@@ -312,7 +312,7 @@ namespace Js
AssertBi(this);
}
- int BigInt::Compare(BigInt *pbi)
+ int BigUInt::Compare(BigUInt *pbi)
{
AssertBi(this);
AssertBi(pbi);
@@ -338,7 +338,7 @@ namespace Js
return (m_prglu[ilu] > pbi->m_prglu[ilu]) ? 1 : -1;
}
- bool BigInt::FAdd(BigInt *pbi)
+ bool BigUInt::FAdd(BigUInt *pbi)
{
AssertBi(this);
AssertBi(pbi);
@@ -391,7 +391,7 @@ namespace Js
return true;
}
- void BigInt::Subtract(BigInt *pbi)
+ void BigUInt::Subtract(BigUInt *pbi)
{
AssertBi(this);
AssertBi(pbi);
@@ -443,7 +443,7 @@ namespace Js
AssertBi(this);
}
- int BigInt::DivRem(BigInt *pbi)
+ int BigUInt::DivRem(BigUInt *pbi)
{
AssertBi(this);
AssertBi(pbi);
@@ -510,7 +510,7 @@ namespace Js
return wQuo;
}
- double BigInt::GetDbl(void)
+ double BigUInt::GetDbl(void)
{
double dbl;
uint32 luHi, luLo;
@@ -591,6 +591,6 @@ namespace Js
return dbl;
}
- template bool BigInt::FInitFromDigits(const char16 *prgch, int32 cch, int32 *pcchDig);
- template bool BigInt::FInitFromDigits(const utf8char_t *prgch, int32 cch, int32 *pcchDig);
+ template bool BigUInt::FInitFromDigits(const char16 *prgch, int32 cch, int32 *pcchDig);
+ template bool BigUInt::FInitFromDigits(const utf8char_t *prgch, int32 cch, int32 *pcchDig);
}
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/BigInt.h b/deps/chakrashim/core/lib/Common/DataStructures/BigUInt.h
similarity index 55%
rename from deps/chakrashim/core/lib/Common/DataStructures/BigInt.h
rename to deps/chakrashim/core/lib/Common/DataStructures/BigUInt.h
index c2d3f1cc649..a3d5c13baee 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/BigInt.h
+++ b/deps/chakrashim/core/lib/Common/DataStructures/BigUInt.h
@@ -9,19 +9,20 @@ namespace Js
/***************************************************************************
Big non-negative integer class.
***************************************************************************/
- class BigInt
+ class BigUInt
{
+ // Non-negative BigInt is stored as an array of 'digit' where each digit is unit32
private:
// Make this big enough that we rarely have to call malloc.
- enum { kcluMaxInit = 30 };
+ enum { kcluMaxInit = 30 };// initilize 30 digits
- int32 m_cluMax;
- int32 m_clu;
- uint32 *m_prglu;
- uint32 m_rgluInit[kcluMaxInit];
+ int32 m_cluMax; // current maximum length (or number of digits) it can contains
+ int32 m_clu; // current length (or number of digits)
+ uint32 *m_prglu; // pointer to array of digits
+ uint32 m_rgluInit[kcluMaxInit]; // pre-defined space to store array
- inline BigInt & operator= (BigInt &bi);
- bool FResize(int32 clu);
+ inline BigUInt & operator= (BigUInt &bi);
+ bool FResize(int32 clu);// allocate more space if length go over maximum
#if DBG
#define AssertBi(pbi) Assert(pbi); (pbi)->AssertValid(true);
@@ -33,25 +34,25 @@ namespace Js
#endif //!DBG
public:
- BigInt(void);
- ~BigInt(void);
+ BigUInt(void);
+ ~BigUInt(void);
- bool FInitFromRglu(uint32 *prglu, int32 clu);
- bool FInitFromBigint(BigInt *pbiSrc);
+ bool FInitFromRglu(uint32 *prglu, int32 clu); // init from array and length
+ bool FInitFromBigint(BigUInt *pbiSrc);
template
- bool FInitFromDigits(const EncodedChar *prgch, int32 cch, int32 *pcchDec);
+ bool FInitFromDigits(const EncodedChar *prgch, int32 cch, int32 *pcchDec); // init from char of digits
bool FMulAdd(uint32 luMul, uint32 luAdd);
bool FMulPow5(int32 c5);
bool FShiftLeft(int32 cbit);
void ShiftLusRight(int32 clu);
void ShiftRight(int32 cbit);
- int Compare(BigInt *pbi);
- bool FAdd(BigInt *pbi);
- void Subtract(BigInt *pbi);
- int DivRem(BigInt *pbi);
+ int Compare(BigUInt *pbi);
+ bool FAdd(BigUInt *pbi);
+ void Subtract(BigUInt *pbi);
+ int DivRem(BigUInt *pbi);
- int32 Clu(void);
- uint32 Lu(int32 ilu);
+ int32 Clu(void); // return current length
+ uint32 Lu(int32 ilu); // return digit at position ilu start from 0
double GetDbl(void);
};
}
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/CMakeLists.txt b/deps/chakrashim/core/lib/Common/DataStructures/CMakeLists.txt
index 0e524aadc29..ac2854f2c3c 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/CMakeLists.txt
+++ b/deps/chakrashim/core/lib/Common/DataStructures/CMakeLists.txt
@@ -1,5 +1,5 @@
add_library (Chakra.Common.DataStructures OBJECT
- BigInt.cpp
+ BigUInt.cpp
BufferBuilder.cpp
CommonDataStructuresPch.cpp
DictionaryStats.cpp
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/Chakra.Common.DataStructures.vcxproj b/deps/chakrashim/core/lib/Common/DataStructures/Chakra.Common.DataStructures.vcxproj
index fb447b00ec7..f7889680c58 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/Chakra.Common.DataStructures.vcxproj
+++ b/deps/chakrashim/core/lib/Common/DataStructures/Chakra.Common.DataStructures.vcxproj
@@ -29,7 +29,7 @@
-
+
@@ -45,7 +45,7 @@
-
+
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/ClusterList.h b/deps/chakrashim/core/lib/Common/DataStructures/ClusterList.h
index b2c94a27643..9100c49151d 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/ClusterList.h
+++ b/deps/chakrashim/core/lib/Common/DataStructures/ClusterList.h
@@ -417,4 +417,4 @@ class SegmentClusterList
Output::Print(_u("]\n"));
}
#endif
-};
\ No newline at end of file
+};
diff --git a/deps/chakrashim/core/lib/Common/DataStructures/DictionaryEntry.h b/deps/chakrashim/core/lib/Common/DataStructures/DictionaryEntry.h
index 9941ef0414f..c93977c109a 100644
--- a/deps/chakrashim/core/lib/Common/DataStructures/DictionaryEntry.h
+++ b/deps/chakrashim/core/lib/Common/DataStructures/DictionaryEntry.h
@@ -6,83 +6,123 @@
namespace JsUtil
{
- template
- class BaseValueEntry
+ namespace
{
- protected:
- TValue value; // data of entry
- void Set(TValue const& value)
+ template
+ struct ChooseSmallerHelper
{
- this->value = value;
- }
-
- public:
- int next; // Index of next entry, -1 if last
+ typedef T2 type;
+ };
- static bool SupportsCleanup()
+ template
+ struct ChooseSmallerHelper
{
- return false;
- }
+ typedef T1 type;
+ };
- static bool NeedsCleanup(BaseValueEntry&)
- {
- return false;
- }
+ template
+ using ChooseSmaller = typename ChooseSmallerHelper::type;
- TValue const& Value() const { return value; }
- TValue& Value() { return value; }
- void SetValue(TValue const& value) { this->value = value; }
- };
-
- template
- class ValueEntry: public BaseValueEntry
- {
- public:
- void Clear()
+ template
+ class ValueEntryData
{
- }
- };
-
- // Class specialization for pointer values to support clearing
- template
- class ValueEntry: public BaseValueEntry
- {
- public:
- void Clear()
+ protected:
+ TValue value; // data of entry
+ public:
+ int next; // Index of next entry, -1 if last
+ };
+
+ template
+ class KeyValueEntryDataLayout1
{
- this->value = nullptr;
- }
- };
-
- template <>
- class ValueEntry: public BaseValueEntry
- {
- public:
- void Clear()
+ protected:
+ TValue value; // data of entry
+ TKey key; // key of entry
+ public:
+ int next; // Index of next entry, -1 if last
+ };
+
+ template
+ class KeyValueEntryDataLayout2
{
- this->value = false;
- }
- };
-
- template <>
- class ValueEntry: public BaseValueEntry
- {
- public:
- void Clear()
+ protected:
+ TValue value; // data of entry
+ public:
+ int next; // Index of next entry, -1 if last
+ protected:
+ TKey key; // key of entry
+ };
+
+ // Packing matters because we make so many dictionary entries.
+ // The int pointing to the next item in the list may be included
+ // either after the value or after the key, depending on which
+ // packs better.
+ template
+ using KeyValueEntryData = ChooseSmaller, KeyValueEntryDataLayout2>;
+
+ template >
+ class ValueEntry : public TData
{
- this->value = 0;
- }
- };
+ protected:
+ void Set(TValue const& value)
+ {
+ this->value = value;
+ }
+
+ public:
+ static bool SupportsCleanup()
+ {
+ return false;
+ }
+
+ static bool NeedsCleanup(ValueEntry&)
+ {
+ return false;
+ }
+
+ void Clear()
+ {
+ ClearValue::Clear(&this->value);
+ }
+
+ TValue const& Value() const { return this->value; }
+ TValue& Value() { return this->value; }
+ void SetValue(TValue const& value) { this->value = value; }
+ };
+
+ // Used by BaseHashSet, the default is that the key is the same as the value
+ template
+ class ImplicitKeyValueEntry : public ValueEntry
+ {
+ public:
+ TKey Key() const { return ValueToKey::ToKey(this->value); }
- template <>
- class ValueEntry: public BaseValueEntry