diff --git a/.gitignore b/.gitignore
index 4f8e77a3e..749832847 100644
--- a/.gitignore
+++ b/.gitignore
@@ -273,3 +273,7 @@ packages/
/.idea
/test/TorchSharpTest/exportsd.py
.vscode/settings.json
+/TestClear
+TestClear/
+/nuget.config
+/src/Native/LibTorchSharp/third_party
diff --git a/Directory.Build.props b/Directory.Build.props
index e8e44ee50..a54b11a75 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -1,10 +1,12 @@
-
+
+ K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-debug-2.11.0+cu130\libtorch\share\cmake\Torch
+
Debug
Debug;Release
<_DefaultArchitecture>$([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture.ToString().ToLower())
@@ -20,7 +22,7 @@
$(RepoRoot)src/
$(RepoRoot)pkg/
- 2.10.0.0
+ 2.11.0.0
2.2.2.0
@@ -86,13 +88,12 @@
- 2.10.0.0
+ 2.11.0.0
2.2.2.0
false
$(LibTorchPackageVersion)
-
true
@@ -167,8 +168,11 @@
$(DefineContants);DEBUG
false
+
+ $(DefineContants);CUDA_TOOLKIT_FOUND
+
true
-
+
\ No newline at end of file
diff --git a/Directory.Build.targets b/Directory.Build.targets
index 4ab3c814c..7f4e8d27c 100644
--- a/Directory.Build.targets
+++ b/Directory.Build.targets
@@ -84,7 +84,7 @@
-
@@ -101,7 +101,7 @@
-
-
+
-
+ -->
\ No newline at end of file
diff --git a/MyCustomCMD.txt b/MyCustomCMD.txt
new file mode 100644
index 000000000..6a438cd66
--- /dev/null
+++ b/MyCustomCMD.txt
@@ -0,0 +1,12 @@
+dotnet build TorchSharpFilter.slnf /p:CustomLibTorchPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-debug-2.6.0+cu126\libtorch" -f netstandard2.0
+build.cmd Release x64 --libtorchpath "K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-2.8.0+cu128\libtorch\share\cmake\Torch"
+
+dotnet build /p:CustomLibTorchFullPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-2.8.0+cu128\libtorch\share\cmake\Torch" -c Release
+
+dotnet build TorchSharpFilter.slnf /p:CustomLibTorchFullPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-debug-2.6.0+cu126\libtorch\share\cmake\Torch" -f netstandard2.0
+
+
+dotnet build /p:CustomLibTorchFullPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-2.11.0+cpu\libtorch\share\cmake\Torch"
+dotnet test /p:CustomLibTorchFullPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-2.11.0+cpu\libtorch\share\cmake\Torch"
+
+dotnet build /p:CustomLibTorchFullPath="K:\FrameworksForC\LibTorch\libtorch-win-shared-with-deps-debug-2.11.0+cu130\libtorch\share\cmake\Torch" -f netstandard2.0 -c Debug
\ No newline at end of file
diff --git a/TorchSharp.sln b/TorchSharp.sln
index b27ac7e8a..9e2c41299 100644
--- a/TorchSharp.sln
+++ b/TorchSharp.sln
@@ -1,3 +1,4 @@
+
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.0.31903.59
@@ -34,7 +35,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "TorchSharp", "TorchSharp",
pkg\TorchSharp\TorchSharp.symbols.nupkgproj = pkg\TorchSharp\TorchSharp.symbols.nupkgproj
EndProjectSection
EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Debug\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{E7467DDF-893C-38A8-8E19-6B4E3FB10F55}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Debug\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{265C2E6F-04E6-37A8-B504-E3DD4A3FEE06}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Release\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{BB811429-0DF1-3D22-B664-09C2F5A9E0AB}"
EndProject
@@ -107,10 +108,10 @@ Global
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|Any CPU.Build.0 = Release|Any CPU
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|x64.ActiveCfg = Release|Any CPU
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|x64.Build.0 = Release|Any CPU
- {E7467DDF-893C-38A8-8E19-6B4E3FB10F55}.Debug|Any CPU.ActiveCfg = Debug|x64
- {E7467DDF-893C-38A8-8E19-6B4E3FB10F55}.Debug|x64.ActiveCfg = Debug|x64
- {E7467DDF-893C-38A8-8E19-6B4E3FB10F55}.Release|Any CPU.ActiveCfg = Release|x64
- {E7467DDF-893C-38A8-8E19-6B4E3FB10F55}.Release|x64.ActiveCfg = Release|x64
+ {265C2E6F-04E6-37A8-B504-E3DD4A3FEE06}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {265C2E6F-04E6-37A8-B504-E3DD4A3FEE06}.Debug|x64.ActiveCfg = Debug|x64
+ {265C2E6F-04E6-37A8-B504-E3DD4A3FEE06}.Release|Any CPU.ActiveCfg = Release|x64
+ {265C2E6F-04E6-37A8-B504-E3DD4A3FEE06}.Release|x64.ActiveCfg = Release|x64
{DD652544-711E-4029-83FF-DA4A9600E6E7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{DD652544-711E-4029-83FF-DA4A9600E6E7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{DD652544-711E-4029-83FF-DA4A9600E6E7}.Debug|x64.ActiveCfg = Debug|Any CPU
@@ -176,7 +177,7 @@ Global
{6C323B05-9028-4B09-911C-3C03AE058BEE} = {AED9C836-31E3-4F3F-8ABC-929555D3F3C4}
{42B45168-476D-4BFA-87B8-81A34E6295CD} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
{567456AD-B026-4CB6-B98D-4FC930C90223} = {D3D38B03-B557-484D-8348-8BADEE4DF592}
- {E7467DDF-893C-38A8-8E19-6B4E3FB10F55} = {CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}
+ {265C2E6F-04E6-37A8-B504-E3DD4A3FEE06} = {CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}
{BB811429-0DF1-3D22-B664-09C2F5A9E0AB} = {4DB9E84D-324C-408F-87A6-246E86205540}
{CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
{D8C60CD8-8429-45F2-A755-47B6CD10FDF8} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
diff --git a/TorchSharpFilter.slnf b/TorchSharpFilter.slnf
new file mode 100644
index 000000000..4f6a8bbe3
--- /dev/null
+++ b/TorchSharpFilter.slnf
@@ -0,0 +1,13 @@
+{
+ "solution": {
+ "path": "TorchSharp.sln",
+ "projects": [
+ "bin\\obj\\x64.Debug\\Native\\LibTorchSharp\\LibTorchSharp.vcxproj",
+ "pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "src\\TorchAudio\\TorchAudio.csproj",
+ "src\\TorchSharp\\TorchSharp.csproj",
+ "src\\TorchVision\\TorchVision.csproj"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/build/Dependencies.props b/build/Dependencies.props
index 74ef9e6ec..d7882820d 100644
--- a/build/Dependencies.props
+++ b/build/Dependencies.props
@@ -9,7 +9,7 @@
2.10.0
2.2.2
- 12.8
+ 13.0
128
2019.0.5.20190502
diff --git a/nuget.config b/nuget.config
new file mode 100644
index 000000000..eb0286a2c
--- /dev/null
+++ b/nuget.config
@@ -0,0 +1,4 @@
+
+
+ D:\NugetPackages
+
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.dgspec.json b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.dgspec.json
new file mode 100644
index 000000000..e80c4a72b
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.dgspec.json
@@ -0,0 +1,224 @@
+{
+ "format": 1,
+ "restore": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj": {}
+ },
+ "projects": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "projectName": "FileRestitcher.Tests",
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "packagesPath": "C:\\Users\\Dimitri\\.nuget\\packages\\",
+ "outputPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.NupkgProj\\",
+ "projectStyle": "PackageReference",
+ "crossTargeting": true,
+ "fallbackFolders": [
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages"
+ ],
+ "configFilePaths": [
+ "K:\\Proyects_Repos\\TorchSharp\\NuGet.Config",
+ "C:\\Users\\Dimitri\\AppData\\Roaming\\NuGet\\NuGet.Config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.FallbackLocation.config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.Offline.config"
+ ],
+ "originalTargetFrameworks": [
+ "net472",
+ "netstandard2.0"
+ ],
+ "sources": {
+ "C:\\Program Files (x86)\\Microsoft SDKs\\NuGetPackages\\": {},
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net472": {
+ "targetAlias": "net472",
+ "projectReferences": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj"
+ }
+ }
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "projectReferences": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj"
+ }
+ }
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "all"
+ },
+ "SdkAnalysisLevel": "9.0.100"
+ },
+ "frameworks": {
+ "net472": {
+ "targetAlias": "net472",
+ "dependencies": {
+ "Microsoft.NET.Test.Sdk": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[16.9.4, )"
+ },
+ "coverlet.collector": {
+ "include": "Runtime, Build, Native, ContentFiles, Analyzers, BuildTransitive",
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[3.0.2, )"
+ },
+ "xunit": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[2.4.2, )"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "dependencies": {
+ "Microsoft.NET.Test.Sdk": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[16.9.4, )"
+ },
+ "NETStandard.Library": {
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.0.3, )",
+ "autoReferenced": true
+ },
+ "coverlet.collector": {
+ "include": "Runtime, Build, Native, ContentFiles, Analyzers, BuildTransitive",
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[3.0.2, )"
+ },
+ "xunit": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[2.4.2, )"
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ }
+ }
+ },
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "projectName": "FileRestitcher",
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "packagesPath": "C:\\Users\\Dimitri\\.nuget\\packages\\",
+ "outputPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.NupkgProj\\",
+ "projectStyle": "PackageReference",
+ "crossTargeting": true,
+ "fallbackFolders": [
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages"
+ ],
+ "configFilePaths": [
+ "K:\\Proyects_Repos\\TorchSharp\\NuGet.Config",
+ "C:\\Users\\Dimitri\\AppData\\Roaming\\NuGet\\NuGet.Config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.FallbackLocation.config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.Offline.config"
+ ],
+ "originalTargetFrameworks": [
+ "net8.0",
+ "netstandard2.0"
+ ],
+ "sources": {
+ "C:\\Program Files (x86)\\Microsoft SDKs\\NuGetPackages\\": {},
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "projectReferences": {}
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "projectReferences": {}
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "all"
+ },
+ "SdkAnalysisLevel": "9.0.100"
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "frameworkReferences": {
+ "Microsoft.NETCore.App": {
+ "privateAssets": "all"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "dependencies": {
+ "NETStandard.Library": {
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.0.3, )",
+ "autoReferenced": true
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.props b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.props
new file mode 100644
index 000000000..7adfe6ee9
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.props
@@ -0,0 +1,35 @@
+
+
+
+ True
+ NuGet
+ $(MSBuildThisFileDirectory)project.assets.json
+ $(UserProfile)\.nuget\packages\
+ C:\Users\Dimitri\.nuget\packages\;C:\Program Files (x86)\Microsoft Visual Studio\Shared\NuGetPackages
+ PackageReference
+ 6.12.0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ C:\Users\Dimitri\.nuget\packages\xunit.analyzers\1.0.0
+
+
+ C:\Users\Dimitri\.nuget\packages\xunit.analyzers\1.0.0
+
+
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.targets b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.targets
new file mode 100644
index 000000000..89347f8d0
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/FileRestitcher.Tests.csproj.nuget.g.targets
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/.NETFramework,Version=v4.7.2.AssemblyAttributes.cs b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/.NETFramework,Version=v4.7.2.AssemblyAttributes.cs
new file mode 100644
index 000000000..3871b184d
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/.NETFramework,Version=v4.7.2.AssemblyAttributes.cs
@@ -0,0 +1,4 @@
+//
+using System;
+using System.Reflection;
+[assembly: global::System.Runtime.Versioning.TargetFrameworkAttribute(".NETFramework,Version=v4.7.2", FrameworkDisplayName = ".NET Framework 4.7.2")]
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfo.cs b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfo.cs
new file mode 100644
index 000000000..13943a5c5
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfo.cs
@@ -0,0 +1,24 @@
+//------------------------------------------------------------------------------
+//
+// Este código fue generado por una herramienta.
+// Versión de runtime:4.0.30319.42000
+//
+// Los cambios en este archivo podrían causar un comportamiento incorrecto y se perderán si
+// se vuelve a generar el código.
+//
+//------------------------------------------------------------------------------
+
+using System;
+using System.Reflection;
+
+[assembly: System.Reflection.AssemblyCompanyAttribute("TorchSharp contributors")]
+[assembly: System.Reflection.AssemblyConfigurationAttribute("Debug")]
+[assembly: System.Reflection.AssemblyCopyrightAttribute("Copyright .NET Foundation and Contributors")]
+[assembly: System.Reflection.AssemblyFileVersionAttribute("1.0.0.0")]
+[assembly: System.Reflection.AssemblyInformationalVersionAttribute("1.0.0+4436c93f069a66702e1d89cb9325f40b734bbaa5")]
+[assembly: System.Reflection.AssemblyProductAttribute("FileRestitcher.Tests")]
+[assembly: System.Reflection.AssemblyTitleAttribute("FileRestitcher.Tests")]
+[assembly: System.Reflection.AssemblyVersionAttribute("1.0.0.0")]
+
+// Generado por la clase WriteCodeFragment de MSBuild.
+
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfoInputs.cache b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfoInputs.cache
new file mode 100644
index 000000000..afd8ba288
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.AssemblyInfoInputs.cache
@@ -0,0 +1 @@
+8466daae7b02d90eea4b8dd285e7b97a791318ca4c0dc896730fa1366db17dd6
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig
new file mode 100644
index 000000000..573a47838
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig
@@ -0,0 +1,8 @@
+is_global = true
+build_property.RootNamespace = FileRestitcher.Tests
+build_property.ProjectDir = K:\Proyects_Repos\TorchSharp\pkg\FileRestitcher\FileRestitcher.Tests\
+build_property.EnableComHosting =
+build_property.EnableGeneratedComInterfaceComImportInterop =
+build_property.CsWinRTUseWindowsUIXamlProjections = false
+build_property.EffectiveAnalysisLevelStyle =
+build_property.EnableCodeStyleSeverity =
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.assets.cache b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.assets.cache
new file mode 100644
index 000000000..bc3774fa6
Binary files /dev/null and b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.assets.cache differ
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.csproj.AssemblyReference.cache b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.csproj.AssemblyReference.cache
new file mode 100644
index 000000000..dbb4be1c9
Binary files /dev/null and b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net472/FileRestitcher.Tests.csproj.AssemblyReference.cache differ
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/.NETCoreApp,Version=v8.0.AssemblyAttributes.cs b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/.NETCoreApp,Version=v8.0.AssemblyAttributes.cs
new file mode 100644
index 000000000..2217181c8
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/.NETCoreApp,Version=v8.0.AssemblyAttributes.cs
@@ -0,0 +1,4 @@
+//
+using System;
+using System.Reflection;
+[assembly: global::System.Runtime.Versioning.TargetFrameworkAttribute(".NETCoreApp,Version=v8.0", FrameworkDisplayName = ".NET 8.0")]
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfo.cs b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfo.cs
new file mode 100644
index 000000000..13943a5c5
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfo.cs
@@ -0,0 +1,24 @@
+//------------------------------------------------------------------------------
+//
+// Este código fue generado por una herramienta.
+// Versión de runtime:4.0.30319.42000
+//
+// Los cambios en este archivo podrían causar un comportamiento incorrecto y se perderán si
+// se vuelve a generar el código.
+//
+//------------------------------------------------------------------------------
+
+using System;
+using System.Reflection;
+
+[assembly: System.Reflection.AssemblyCompanyAttribute("TorchSharp contributors")]
+[assembly: System.Reflection.AssemblyConfigurationAttribute("Debug")]
+[assembly: System.Reflection.AssemblyCopyrightAttribute("Copyright .NET Foundation and Contributors")]
+[assembly: System.Reflection.AssemblyFileVersionAttribute("1.0.0.0")]
+[assembly: System.Reflection.AssemblyInformationalVersionAttribute("1.0.0+4436c93f069a66702e1d89cb9325f40b734bbaa5")]
+[assembly: System.Reflection.AssemblyProductAttribute("FileRestitcher.Tests")]
+[assembly: System.Reflection.AssemblyTitleAttribute("FileRestitcher.Tests")]
+[assembly: System.Reflection.AssemblyVersionAttribute("1.0.0.0")]
+
+// Generado por la clase WriteCodeFragment de MSBuild.
+
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfoInputs.cache b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfoInputs.cache
new file mode 100644
index 000000000..afd8ba288
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.AssemblyInfoInputs.cache
@@ -0,0 +1 @@
+8466daae7b02d90eea4b8dd285e7b97a791318ca4c0dc896730fa1366db17dd6
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig
new file mode 100644
index 000000000..7957ddc75
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/net8.0/FileRestitcher.Tests.GeneratedMSBuildEditorConfig.editorconfig
@@ -0,0 +1,15 @@
+is_global = true
+build_property.TargetFramework = net8.0
+build_property.TargetPlatformMinVersion =
+build_property.UsingMicrosoftNETSdkWeb =
+build_property.ProjectTypeGuids =
+build_property.InvariantGlobalization =
+build_property.PlatformNeutralAssembly =
+build_property.EnforceExtendedAnalyzerRules =
+build_property._SupportedPlatformList = Linux,macOS,Windows
+build_property.RootNamespace = FileRestitcher.Tests
+build_property.ProjectDir = K:\Proyects_Repos\TorchSharp\pkg\FileRestitcher\FileRestitcher.Tests\
+build_property.EnableComHosting =
+build_property.EnableGeneratedComInterfaceComImportInterop =
+build_property.EffectiveAnalysisLevelStyle = 8.0
+build_property.EnableCodeStyleSeverity =
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.assets.json b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.assets.json
new file mode 100644
index 000000000..ac4726f8d
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.assets.json
@@ -0,0 +1,841 @@
+{
+ "version": 3,
+ "targets": {
+ ".NETFramework,Version=v4.7.2": {
+ "coverlet.collector/3.0.2": {
+ "type": "package",
+ "build": {
+ "build/netstandard1.0/coverlet.collector.targets": {}
+ }
+ },
+ "Microsoft.CodeCoverage/16.9.4": {
+ "type": "package",
+ "compile": {
+ "lib/net45/Microsoft.VisualStudio.CodeCoverage.Shim.dll": {}
+ },
+ "runtime": {
+ "lib/net45/Microsoft.VisualStudio.CodeCoverage.Shim.dll": {}
+ },
+ "build": {
+ "build/netstandard1.0/Microsoft.CodeCoverage.props": {},
+ "build/netstandard1.0/Microsoft.CodeCoverage.targets": {}
+ }
+ },
+ "Microsoft.NET.Test.Sdk/16.9.4": {
+ "type": "package",
+ "dependencies": {
+ "Microsoft.CodeCoverage": "16.9.4"
+ },
+ "compile": {
+ "lib/net45/_._": {}
+ },
+ "runtime": {
+ "lib/net45/_._": {}
+ },
+ "build": {
+ "build/net45/Microsoft.NET.Test.Sdk.props": {},
+ "build/net45/Microsoft.NET.Test.Sdk.targets": {}
+ },
+ "buildMultiTargeting": {
+ "buildMultiTargeting/Microsoft.NET.Test.Sdk.props": {}
+ }
+ },
+ "xunit/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.analyzers": "1.0.0",
+ "xunit.assert": "2.4.2",
+ "xunit.core": "[2.4.2]"
+ }
+ },
+ "xunit.abstractions/2.0.3": {
+ "type": "package",
+ "compile": {
+ "lib/net35/xunit.abstractions.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/net35/xunit.abstractions.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "xunit.analyzers/1.0.0": {
+ "type": "package"
+ },
+ "xunit.assert/2.4.2": {
+ "type": "package",
+ "compile": {
+ "lib/netstandard1.1/xunit.assert.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/netstandard1.1/xunit.assert.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "xunit.core/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.extensibility.core": "[2.4.2]",
+ "xunit.extensibility.execution": "[2.4.2]"
+ },
+ "build": {
+ "build/xunit.core.props": {},
+ "build/xunit.core.targets": {}
+ },
+ "buildMultiTargeting": {
+ "buildMultiTargeting/xunit.core.props": {},
+ "buildMultiTargeting/xunit.core.targets": {}
+ }
+ },
+ "xunit.extensibility.core/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.abstractions": "2.0.3"
+ },
+ "compile": {
+ "lib/net452/xunit.core.dll": {
+ "related": ".dll.tdnet;.xml"
+ }
+ },
+ "runtime": {
+ "lib/net452/xunit.core.dll": {
+ "related": ".dll.tdnet;.xml"
+ }
+ }
+ },
+ "xunit.extensibility.execution/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.extensibility.core": "[2.4.2]"
+ },
+ "compile": {
+ "lib/net452/xunit.execution.desktop.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/net452/xunit.execution.desktop.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "FileRestitcher/1.0.0": {
+ "type": "project",
+ "framework": ".NETStandard,Version=v2.0",
+ "compile": {
+ "bin/placeholder/FileRestitcher.dll": {}
+ },
+ "runtime": {
+ "bin/placeholder/FileRestitcher.dll": {}
+ }
+ }
+ },
+ ".NETStandard,Version=v2.0": {
+ "coverlet.collector/3.0.2": {
+ "type": "package",
+ "build": {
+ "build/netstandard1.0/coverlet.collector.targets": {}
+ }
+ },
+ "Microsoft.CodeCoverage/16.9.4": {
+ "type": "package",
+ "build": {
+ "build/netstandard1.0/Microsoft.CodeCoverage.props": {},
+ "build/netstandard1.0/Microsoft.CodeCoverage.targets": {}
+ }
+ },
+ "Microsoft.NET.Test.Sdk/16.9.4": {
+ "type": "package",
+ "dependencies": {
+ "Microsoft.CodeCoverage": "16.9.4"
+ },
+ "buildMultiTargeting": {
+ "buildMultiTargeting/Microsoft.NET.Test.Sdk.props": {}
+ }
+ },
+ "Microsoft.NETCore.Platforms/1.1.0": {
+ "type": "package",
+ "compile": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "runtime": {
+ "lib/netstandard1.0/_._": {}
+ }
+ },
+ "NETStandard.Library/2.0.3": {
+ "type": "package",
+ "dependencies": {
+ "Microsoft.NETCore.Platforms": "1.1.0"
+ },
+ "compile": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "runtime": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "build": {
+ "build/netstandard2.0/NETStandard.Library.targets": {}
+ }
+ },
+ "xunit/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.analyzers": "1.0.0",
+ "xunit.assert": "2.4.2",
+ "xunit.core": "[2.4.2]"
+ }
+ },
+ "xunit.abstractions/2.0.3": {
+ "type": "package",
+ "compile": {
+ "lib/netstandard2.0/xunit.abstractions.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/netstandard2.0/xunit.abstractions.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "xunit.analyzers/1.0.0": {
+ "type": "package"
+ },
+ "xunit.assert/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "NETStandard.Library": "1.6.1"
+ },
+ "compile": {
+ "lib/netstandard1.1/xunit.assert.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/netstandard1.1/xunit.assert.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "xunit.core/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "xunit.extensibility.core": "[2.4.2]",
+ "xunit.extensibility.execution": "[2.4.2]"
+ },
+ "build": {
+ "build/xunit.core.props": {},
+ "build/xunit.core.targets": {}
+ },
+ "buildMultiTargeting": {
+ "buildMultiTargeting/xunit.core.props": {},
+ "buildMultiTargeting/xunit.core.targets": {}
+ }
+ },
+ "xunit.extensibility.core/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "NETStandard.Library": "1.6.1",
+ "xunit.abstractions": "2.0.3"
+ },
+ "compile": {
+ "lib/netstandard1.1/xunit.core.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/netstandard1.1/xunit.core.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "xunit.extensibility.execution/2.4.2": {
+ "type": "package",
+ "dependencies": {
+ "NETStandard.Library": "1.6.1",
+ "xunit.extensibility.core": "[2.4.2]"
+ },
+ "compile": {
+ "lib/netstandard1.1/xunit.execution.dotnet.dll": {
+ "related": ".xml"
+ }
+ },
+ "runtime": {
+ "lib/netstandard1.1/xunit.execution.dotnet.dll": {
+ "related": ".xml"
+ }
+ }
+ },
+ "FileRestitcher/1.0.0": {
+ "type": "project",
+ "framework": ".NETStandard,Version=v2.0",
+ "compile": {
+ "bin/placeholder/FileRestitcher.dll": {}
+ },
+ "runtime": {
+ "bin/placeholder/FileRestitcher.dll": {}
+ }
+ }
+ }
+ },
+ "libraries": {
+ "coverlet.collector/3.0.2": {
+ "sha512": "iBvPAIDaI7j/iMx/DzCGCJ3rdiOmel9VINEfaTiBv/NKIGHOP4X3hqc6Q1wgMtArEshlhXexQknP17SK4vXb1w==",
+ "type": "package",
+ "path": "coverlet.collector/3.0.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "build/netstandard1.0/Microsoft.CSharp.dll",
+ "build/netstandard1.0/Microsoft.DotNet.PlatformAbstractions.dll",
+ "build/netstandard1.0/Microsoft.Extensions.DependencyInjection.Abstractions.dll",
+ "build/netstandard1.0/Microsoft.Extensions.DependencyInjection.dll",
+ "build/netstandard1.0/Microsoft.Extensions.DependencyModel.dll",
+ "build/netstandard1.0/Microsoft.Extensions.FileSystemGlobbing.dll",
+ "build/netstandard1.0/Microsoft.TestPlatform.CoreUtilities.dll",
+ "build/netstandard1.0/Microsoft.TestPlatform.PlatformAbstractions.dll",
+ "build/netstandard1.0/Microsoft.VisualStudio.TestPlatform.ObjectModel.dll",
+ "build/netstandard1.0/Mono.Cecil.Mdb.dll",
+ "build/netstandard1.0/Mono.Cecil.Pdb.dll",
+ "build/netstandard1.0/Mono.Cecil.Rocks.dll",
+ "build/netstandard1.0/Mono.Cecil.dll",
+ "build/netstandard1.0/Newtonsoft.Json.dll",
+ "build/netstandard1.0/NuGet.Frameworks.dll",
+ "build/netstandard1.0/System.AppContext.dll",
+ "build/netstandard1.0/System.Collections.Immutable.dll",
+ "build/netstandard1.0/System.Dynamic.Runtime.dll",
+ "build/netstandard1.0/System.IO.FileSystem.Primitives.dll",
+ "build/netstandard1.0/System.Linq.Expressions.dll",
+ "build/netstandard1.0/System.Linq.dll",
+ "build/netstandard1.0/System.ObjectModel.dll",
+ "build/netstandard1.0/System.Reflection.Emit.ILGeneration.dll",
+ "build/netstandard1.0/System.Reflection.Emit.Lightweight.dll",
+ "build/netstandard1.0/System.Reflection.Emit.dll",
+ "build/netstandard1.0/System.Reflection.Metadata.dll",
+ "build/netstandard1.0/System.Reflection.TypeExtensions.dll",
+ "build/netstandard1.0/System.Runtime.Serialization.Primitives.dll",
+ "build/netstandard1.0/System.Text.RegularExpressions.dll",
+ "build/netstandard1.0/System.Threading.Tasks.Extensions.dll",
+ "build/netstandard1.0/System.Threading.dll",
+ "build/netstandard1.0/System.Xml.ReaderWriter.dll",
+ "build/netstandard1.0/System.Xml.XDocument.dll",
+ "build/netstandard1.0/coverlet.collector.deps.json",
+ "build/netstandard1.0/coverlet.collector.dll",
+ "build/netstandard1.0/coverlet.collector.pdb",
+ "build/netstandard1.0/coverlet.collector.targets",
+ "build/netstandard1.0/coverlet.core.dll",
+ "build/netstandard1.0/coverlet.core.pdb",
+ "coverlet-icon.png",
+ "coverlet.collector.3.0.2.nupkg.sha512",
+ "coverlet.collector.nuspec"
+ ]
+ },
+ "Microsoft.CodeCoverage/16.9.4": {
+ "sha512": "N/RYB07gJkPZ1nJiq0QGxFIL+X5vVl4GI99PiTYXpbfI30NTZMRJgZ+4jYLFYLDQqj9o1Juhv+3iiymd7lozrA==",
+ "type": "package",
+ "path": "microsoft.codecoverage/16.9.4",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "Icon.png",
+ "LICENSE_NET.txt",
+ "build/netstandard1.0/CodeCoverage/CodeCoverage.config",
+ "build/netstandard1.0/CodeCoverage/CodeCoverage.exe",
+ "build/netstandard1.0/CodeCoverage/VanguardInstrumentationProfiler_x86.config",
+ "build/netstandard1.0/CodeCoverage/amd64/CodeCoverage.exe",
+ "build/netstandard1.0/CodeCoverage/amd64/VanguardInstrumentationProfiler_x64.config",
+ "build/netstandard1.0/CodeCoverage/amd64/covrun64.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/msdia140.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/msvcdis140.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/msvcp140.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/msvcp140_atomic_wait.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/vcruntime140.dll",
+ "build/netstandard1.0/CodeCoverage/amd64/vcruntime140_1.dll",
+ "build/netstandard1.0/CodeCoverage/codecoveragemessages.dll",
+ "build/netstandard1.0/CodeCoverage/coreclr/Microsoft.VisualStudio.CodeCoverage.Shim.dll",
+ "build/netstandard1.0/CodeCoverage/covrun32.dll",
+ "build/netstandard1.0/CodeCoverage/msdia140.dll",
+ "build/netstandard1.0/CodeCoverage/msvcdis140.dll",
+ "build/netstandard1.0/CodeCoverage/msvcp140.dll",
+ "build/netstandard1.0/CodeCoverage/msvcp140_atomic_wait.dll",
+ "build/netstandard1.0/CodeCoverage/vcruntime140.dll",
+ "build/netstandard1.0/InstrumentationEngine/x64/MicrosoftInstrumentationEngine_x64.dll",
+ "build/netstandard1.0/InstrumentationEngine/x86/MicrosoftInstrumentationEngine_x86.dll",
+ "build/netstandard1.0/Microsoft.CodeCoverage.props",
+ "build/netstandard1.0/Microsoft.CodeCoverage.targets",
+ "build/netstandard1.0/Microsoft.VisualStudio.Coverage.CoreLib.Net.dll",
+ "build/netstandard1.0/Microsoft.VisualStudio.Coverage.Interprocess.dll",
+ "build/netstandard1.0/Microsoft.VisualStudio.TraceDataCollector.dll",
+ "build/netstandard1.0/cs/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/cs/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/de/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/de/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/es/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/es/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/fr/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/fr/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/it/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/it/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/ja/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/ja/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/ko/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/ko/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/pl/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/pl/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/pt-BR/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/pt-BR/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/ru/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/ru/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/tr/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/tr/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/zh-Hans/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/zh-Hans/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "build/netstandard1.0/zh-Hant/Microsoft.VisualStudio.Coverage.CoreLib.Net.resources.dll",
+ "build/netstandard1.0/zh-Hant/Microsoft.VisualStudio.TraceDataCollector.resources.dll",
+ "lib/net45/Microsoft.VisualStudio.CodeCoverage.Shim.dll",
+ "lib/netcoreapp1.0/Microsoft.VisualStudio.CodeCoverage.Shim.dll",
+ "microsoft.codecoverage.16.9.4.nupkg.sha512",
+ "microsoft.codecoverage.nuspec"
+ ]
+ },
+ "Microsoft.NET.Test.Sdk/16.9.4": {
+ "sha512": "M/k16vmS7Hz/+Kuy3p6XE743XPjYYMzfN5ZvpSLY44Ngh5IBMk0Je5Qed8oq6/kvzJA2DTrXa7YrfceHhbQKeQ==",
+ "type": "package",
+ "path": "microsoft.net.test.sdk/16.9.4",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "Icon.png",
+ "LICENSE_NET.txt",
+ "build/net40/Microsoft.NET.Test.Sdk.props",
+ "build/net40/Microsoft.NET.Test.Sdk.targets",
+ "build/net45/Microsoft.NET.Test.Sdk.props",
+ "build/net45/Microsoft.NET.Test.Sdk.targets",
+ "build/netcoreapp1.0/Microsoft.NET.Test.Sdk.Program.cs",
+ "build/netcoreapp1.0/Microsoft.NET.Test.Sdk.Program.fs",
+ "build/netcoreapp1.0/Microsoft.NET.Test.Sdk.Program.vb",
+ "build/netcoreapp1.0/Microsoft.NET.Test.Sdk.props",
+ "build/netcoreapp1.0/Microsoft.NET.Test.Sdk.targets",
+ "build/netcoreapp2.1/Microsoft.NET.Test.Sdk.Program.cs",
+ "build/netcoreapp2.1/Microsoft.NET.Test.Sdk.Program.fs",
+ "build/netcoreapp2.1/Microsoft.NET.Test.Sdk.Program.vb",
+ "build/netcoreapp2.1/Microsoft.NET.Test.Sdk.props",
+ "build/netcoreapp2.1/Microsoft.NET.Test.Sdk.targets",
+ "build/uap10.0/Microsoft.NET.Test.Sdk.props",
+ "buildMultiTargeting/Microsoft.NET.Test.Sdk.props",
+ "lib/net40/_._",
+ "lib/net45/_._",
+ "lib/netcoreapp1.0/_._",
+ "lib/netcoreapp2.1/_._",
+ "lib/uap10.0/_._",
+ "microsoft.net.test.sdk.16.9.4.nupkg.sha512",
+ "microsoft.net.test.sdk.nuspec"
+ ]
+ },
+ "Microsoft.NETCore.Platforms/1.1.0": {
+ "sha512": "kz0PEW2lhqygehI/d6XsPCQzD7ff7gUJaVGPVETX611eadGsA3A877GdSlU0LRVMCTH/+P3o2iDTak+S08V2+A==",
+ "type": "package",
+ "path": "microsoft.netcore.platforms/1.1.0",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "ThirdPartyNotices.txt",
+ "dotnet_library_license.txt",
+ "lib/netstandard1.0/_._",
+ "microsoft.netcore.platforms.1.1.0.nupkg.sha512",
+ "microsoft.netcore.platforms.nuspec",
+ "runtime.json"
+ ]
+ },
+ "NETStandard.Library/2.0.3": {
+ "sha512": "st47PosZSHrjECdjeIzZQbzivYBJFv6P2nv4cj2ypdI204DO+vZ7l5raGMiX4eXMJ53RfOIg+/s4DHVZ54Nu2A==",
+ "type": "package",
+ "path": "netstandard.library/2.0.3",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "LICENSE.TXT",
+ "THIRD-PARTY-NOTICES.TXT",
+ "build/netstandard2.0/NETStandard.Library.targets",
+ "build/netstandard2.0/ref/Microsoft.Win32.Primitives.dll",
+ "build/netstandard2.0/ref/System.AppContext.dll",
+ "build/netstandard2.0/ref/System.Collections.Concurrent.dll",
+ "build/netstandard2.0/ref/System.Collections.NonGeneric.dll",
+ "build/netstandard2.0/ref/System.Collections.Specialized.dll",
+ "build/netstandard2.0/ref/System.Collections.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.Composition.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.EventBasedAsync.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.Primitives.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.TypeConverter.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.dll",
+ "build/netstandard2.0/ref/System.Console.dll",
+ "build/netstandard2.0/ref/System.Core.dll",
+ "build/netstandard2.0/ref/System.Data.Common.dll",
+ "build/netstandard2.0/ref/System.Data.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Contracts.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Debug.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.FileVersionInfo.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Process.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.StackTrace.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.TextWriterTraceListener.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Tools.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.TraceSource.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Tracing.dll",
+ "build/netstandard2.0/ref/System.Drawing.Primitives.dll",
+ "build/netstandard2.0/ref/System.Drawing.dll",
+ "build/netstandard2.0/ref/System.Dynamic.Runtime.dll",
+ "build/netstandard2.0/ref/System.Globalization.Calendars.dll",
+ "build/netstandard2.0/ref/System.Globalization.Extensions.dll",
+ "build/netstandard2.0/ref/System.Globalization.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.FileSystem.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.ZipFile.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.DriveInfo.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.Primitives.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.Watcher.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.dll",
+ "build/netstandard2.0/ref/System.IO.IsolatedStorage.dll",
+ "build/netstandard2.0/ref/System.IO.MemoryMappedFiles.dll",
+ "build/netstandard2.0/ref/System.IO.Pipes.dll",
+ "build/netstandard2.0/ref/System.IO.UnmanagedMemoryStream.dll",
+ "build/netstandard2.0/ref/System.IO.dll",
+ "build/netstandard2.0/ref/System.Linq.Expressions.dll",
+ "build/netstandard2.0/ref/System.Linq.Parallel.dll",
+ "build/netstandard2.0/ref/System.Linq.Queryable.dll",
+ "build/netstandard2.0/ref/System.Linq.dll",
+ "build/netstandard2.0/ref/System.Net.Http.dll",
+ "build/netstandard2.0/ref/System.Net.NameResolution.dll",
+ "build/netstandard2.0/ref/System.Net.NetworkInformation.dll",
+ "build/netstandard2.0/ref/System.Net.Ping.dll",
+ "build/netstandard2.0/ref/System.Net.Primitives.dll",
+ "build/netstandard2.0/ref/System.Net.Requests.dll",
+ "build/netstandard2.0/ref/System.Net.Security.dll",
+ "build/netstandard2.0/ref/System.Net.Sockets.dll",
+ "build/netstandard2.0/ref/System.Net.WebHeaderCollection.dll",
+ "build/netstandard2.0/ref/System.Net.WebSockets.Client.dll",
+ "build/netstandard2.0/ref/System.Net.WebSockets.dll",
+ "build/netstandard2.0/ref/System.Net.dll",
+ "build/netstandard2.0/ref/System.Numerics.dll",
+ "build/netstandard2.0/ref/System.ObjectModel.dll",
+ "build/netstandard2.0/ref/System.Reflection.Extensions.dll",
+ "build/netstandard2.0/ref/System.Reflection.Primitives.dll",
+ "build/netstandard2.0/ref/System.Reflection.dll",
+ "build/netstandard2.0/ref/System.Resources.Reader.dll",
+ "build/netstandard2.0/ref/System.Resources.ResourceManager.dll",
+ "build/netstandard2.0/ref/System.Resources.Writer.dll",
+ "build/netstandard2.0/ref/System.Runtime.CompilerServices.VisualC.dll",
+ "build/netstandard2.0/ref/System.Runtime.Extensions.dll",
+ "build/netstandard2.0/ref/System.Runtime.Handles.dll",
+ "build/netstandard2.0/ref/System.Runtime.InteropServices.RuntimeInformation.dll",
+ "build/netstandard2.0/ref/System.Runtime.InteropServices.dll",
+ "build/netstandard2.0/ref/System.Runtime.Numerics.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Formatters.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Json.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Primitives.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Xml.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.dll",
+ "build/netstandard2.0/ref/System.Runtime.dll",
+ "build/netstandard2.0/ref/System.Security.Claims.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Algorithms.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Csp.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Encoding.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Primitives.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.X509Certificates.dll",
+ "build/netstandard2.0/ref/System.Security.Principal.dll",
+ "build/netstandard2.0/ref/System.Security.SecureString.dll",
+ "build/netstandard2.0/ref/System.ServiceModel.Web.dll",
+ "build/netstandard2.0/ref/System.Text.Encoding.Extensions.dll",
+ "build/netstandard2.0/ref/System.Text.Encoding.dll",
+ "build/netstandard2.0/ref/System.Text.RegularExpressions.dll",
+ "build/netstandard2.0/ref/System.Threading.Overlapped.dll",
+ "build/netstandard2.0/ref/System.Threading.Tasks.Parallel.dll",
+ "build/netstandard2.0/ref/System.Threading.Tasks.dll",
+ "build/netstandard2.0/ref/System.Threading.Thread.dll",
+ "build/netstandard2.0/ref/System.Threading.ThreadPool.dll",
+ "build/netstandard2.0/ref/System.Threading.Timer.dll",
+ "build/netstandard2.0/ref/System.Threading.dll",
+ "build/netstandard2.0/ref/System.Transactions.dll",
+ "build/netstandard2.0/ref/System.ValueTuple.dll",
+ "build/netstandard2.0/ref/System.Web.dll",
+ "build/netstandard2.0/ref/System.Windows.dll",
+ "build/netstandard2.0/ref/System.Xml.Linq.dll",
+ "build/netstandard2.0/ref/System.Xml.ReaderWriter.dll",
+ "build/netstandard2.0/ref/System.Xml.Serialization.dll",
+ "build/netstandard2.0/ref/System.Xml.XDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XPath.XDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XPath.dll",
+ "build/netstandard2.0/ref/System.Xml.XmlDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XmlSerializer.dll",
+ "build/netstandard2.0/ref/System.Xml.dll",
+ "build/netstandard2.0/ref/System.dll",
+ "build/netstandard2.0/ref/mscorlib.dll",
+ "build/netstandard2.0/ref/netstandard.dll",
+ "build/netstandard2.0/ref/netstandard.xml",
+ "lib/netstandard1.0/_._",
+ "netstandard.library.2.0.3.nupkg.sha512",
+ "netstandard.library.nuspec"
+ ]
+ },
+ "xunit/2.4.2": {
+ "sha512": "6Mj73Ont3zj2CJuoykVJfE0ZmRwn7C+pTuRP8c4bnaaTFjwNG6tGe0prJ1yIbMe9AHrpDys63ctWacSsFJWK/w==",
+ "type": "package",
+ "path": "xunit/2.4.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "xunit.2.4.2.nupkg.sha512",
+ "xunit.nuspec"
+ ]
+ },
+ "xunit.abstractions/2.0.3": {
+ "sha512": "pot1I4YOxlWjIb5jmwvvQNbTrZ3lJQ+jUGkGjWE3hEFM0l5gOnBWS+H3qsex68s5cO52g+44vpGzhAt+42vwKg==",
+ "type": "package",
+ "path": "xunit.abstractions/2.0.3",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "lib/net35/xunit.abstractions.dll",
+ "lib/net35/xunit.abstractions.xml",
+ "lib/netstandard1.0/xunit.abstractions.dll",
+ "lib/netstandard1.0/xunit.abstractions.xml",
+ "lib/netstandard2.0/xunit.abstractions.dll",
+ "lib/netstandard2.0/xunit.abstractions.xml",
+ "xunit.abstractions.2.0.3.nupkg.sha512",
+ "xunit.abstractions.nuspec"
+ ]
+ },
+ "xunit.analyzers/1.0.0": {
+ "sha512": "BeO8hEgs/c8Ls2647fPfieMngncvf0D0xYNDfIO59MolxtCtVjFRd6SRc+7tj8VMqkVOuJcnc9eh4ngI2cAmLQ==",
+ "type": "package",
+ "path": "xunit.analyzers/1.0.0",
+ "hasTools": true,
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "analyzers/dotnet/cs/xunit.analyzers.dll",
+ "analyzers/dotnet/cs/xunit.analyzers.fixes.dll",
+ "tools/install.ps1",
+ "tools/uninstall.ps1",
+ "xunit.analyzers.1.0.0.nupkg.sha512",
+ "xunit.analyzers.nuspec"
+ ]
+ },
+ "xunit.assert/2.4.2": {
+ "sha512": "pxJISOFjn2XTTi1mcDCkRZrTFb9OtRRCtx2kZFNF51GdReLr1ls2rnyxvAS4JO247K3aNtflvh5Q0346K5BROA==",
+ "type": "package",
+ "path": "xunit.assert/2.4.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "lib/netstandard1.1/xunit.assert.dll",
+ "lib/netstandard1.1/xunit.assert.xml",
+ "xunit.assert.2.4.2.nupkg.sha512",
+ "xunit.assert.nuspec"
+ ]
+ },
+ "xunit.core/2.4.2": {
+ "sha512": "KB4yGCxNqIVyekhJLXtKSEq6BaXVp/JO3mbGVE1hxypZTLEe7h+sTbAhpA+yZW2dPtXTuiW+C1B2oxxHEkrmOw==",
+ "type": "package",
+ "path": "xunit.core/2.4.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "build/xunit.core.props",
+ "build/xunit.core.targets",
+ "buildMultiTargeting/xunit.core.props",
+ "buildMultiTargeting/xunit.core.targets",
+ "xunit.core.2.4.2.nupkg.sha512",
+ "xunit.core.nuspec"
+ ]
+ },
+ "xunit.extensibility.core/2.4.2": {
+ "sha512": "W1BoXTIN1C6kpVSMw25huSet25ky6IAQUNovu3zGOGN/jWnbgSoTyCrlIhmXSg0tH5nEf8q7h3OjNHOjyu5PfA==",
+ "type": "package",
+ "path": "xunit.extensibility.core/2.4.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "lib/net452/xunit.core.dll",
+ "lib/net452/xunit.core.dll.tdnet",
+ "lib/net452/xunit.core.xml",
+ "lib/net452/xunit.runner.tdnet.dll",
+ "lib/net452/xunit.runner.utility.net452.dll",
+ "lib/netstandard1.1/xunit.core.dll",
+ "lib/netstandard1.1/xunit.core.xml",
+ "xunit.extensibility.core.2.4.2.nupkg.sha512",
+ "xunit.extensibility.core.nuspec"
+ ]
+ },
+ "xunit.extensibility.execution/2.4.2": {
+ "sha512": "CZmgcKkwpyo8FlupZdWpJCryrAOWLh1FBPG6gmVZuPQkGQsim/oL4PcP4nfrC2hHgXUFtluvaJ0Sp9PQKUMNpg==",
+ "type": "package",
+ "path": "xunit.extensibility.execution/2.4.2",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "_content/logo-128-transparent.png",
+ "lib/net452/xunit.execution.desktop.dll",
+ "lib/net452/xunit.execution.desktop.xml",
+ "lib/netstandard1.1/xunit.execution.dotnet.dll",
+ "lib/netstandard1.1/xunit.execution.dotnet.xml",
+ "xunit.extensibility.execution.2.4.2.nupkg.sha512",
+ "xunit.extensibility.execution.nuspec"
+ ]
+ },
+ "FileRestitcher/1.0.0": {
+ "type": "project",
+ "path": "../FileRestitcher/FileRestitcher.csproj",
+ "msbuildProject": "../FileRestitcher/FileRestitcher.csproj"
+ }
+ },
+ "projectFileDependencyGroups": {
+ ".NETFramework,Version=v4.7.2": [
+ "FileRestitcher >= 1.0.0",
+ "Microsoft.NET.Test.Sdk >= 16.9.4",
+ "coverlet.collector >= 3.0.2",
+ "xunit >= 2.4.2"
+ ],
+ ".NETStandard,Version=v2.0": [
+ "FileRestitcher >= 1.0.0",
+ "Microsoft.NET.Test.Sdk >= 16.9.4",
+ "NETStandard.Library >= 2.0.3",
+ "coverlet.collector >= 3.0.2",
+ "xunit >= 2.4.2"
+ ]
+ },
+ "packageFolders": {
+ "C:\\Users\\Dimitri\\.nuget\\packages\\": {},
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages": {}
+ },
+ "project": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "projectName": "FileRestitcher.Tests",
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "packagesPath": "C:\\Users\\Dimitri\\.nuget\\packages\\",
+ "outputPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.NupkgProj\\",
+ "projectStyle": "PackageReference",
+ "crossTargeting": true,
+ "fallbackFolders": [
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages"
+ ],
+ "configFilePaths": [
+ "K:\\Proyects_Repos\\TorchSharp\\NuGet.Config",
+ "C:\\Users\\Dimitri\\AppData\\Roaming\\NuGet\\NuGet.Config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.FallbackLocation.config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.Offline.config"
+ ],
+ "originalTargetFrameworks": [
+ "net472",
+ "netstandard2.0"
+ ],
+ "sources": {
+ "C:\\Program Files (x86)\\Microsoft SDKs\\NuGetPackages\\": {},
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net472": {
+ "targetAlias": "net472",
+ "projectReferences": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj"
+ }
+ }
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "projectReferences": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj"
+ }
+ }
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "all"
+ },
+ "SdkAnalysisLevel": "9.0.100"
+ },
+ "frameworks": {
+ "net472": {
+ "targetAlias": "net472",
+ "dependencies": {
+ "Microsoft.NET.Test.Sdk": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[16.9.4, )"
+ },
+ "coverlet.collector": {
+ "include": "Runtime, Build, Native, ContentFiles, Analyzers, BuildTransitive",
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[3.0.2, )"
+ },
+ "xunit": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[2.4.2, )"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "dependencies": {
+ "Microsoft.NET.Test.Sdk": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[16.9.4, )"
+ },
+ "NETStandard.Library": {
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.0.3, )",
+ "autoReferenced": true
+ },
+ "coverlet.collector": {
+ "include": "Runtime, Build, Native, ContentFiles, Analyzers, BuildTransitive",
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[3.0.2, )"
+ },
+ "xunit": {
+ "suppressParent": "None",
+ "target": "Package",
+ "version": "[2.4.2, )"
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.nuget.cache b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.nuget.cache
new file mode 100644
index 000000000..fd9b0a74d
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.NupkgProj/project.nuget.cache
@@ -0,0 +1,21 @@
+{
+ "version": 2,
+ "dgSpecHash": "md8eUrGszbk=",
+ "success": true,
+ "projectFilePath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher.Tests\\FileRestitcher.Tests.csproj",
+ "expectedPackageFiles": [
+ "C:\\Users\\Dimitri\\.nuget\\packages\\coverlet.collector\\3.0.2\\coverlet.collector.3.0.2.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\microsoft.codecoverage\\16.9.4\\microsoft.codecoverage.16.9.4.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\microsoft.net.test.sdk\\16.9.4\\microsoft.net.test.sdk.16.9.4.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\microsoft.netcore.platforms\\1.1.0\\microsoft.netcore.platforms.1.1.0.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\netstandard.library\\2.0.3\\netstandard.library.2.0.3.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit\\2.4.2\\xunit.2.4.2.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.abstractions\\2.0.3\\xunit.abstractions.2.0.3.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.analyzers\\1.0.0\\xunit.analyzers.1.0.0.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.assert\\2.4.2\\xunit.assert.2.4.2.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.core\\2.4.2\\xunit.core.2.4.2.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.extensibility.core\\2.4.2\\xunit.extensibility.core.2.4.2.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\xunit.extensibility.execution\\2.4.2\\xunit.extensibility.execution.2.4.2.nupkg.sha512"
+ ],
+ "logs": []
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.csproj b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.csproj
index 7b19650d6..0a570605d 100644
--- a/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.csproj
+++ b/pkg/FileRestitcher/FileRestitcher.Tests/FileRestitcher.Tests.csproj
@@ -1,9 +1,9 @@
-
+
false
-
+ netstandard2.0;$(TargetFrameworks)
net8.0
net472;$(TargetFrameworks)
@@ -13,8 +13,15 @@
+
+
+
-
+
+ runtime; build; native; contentfiles; analyzers; buildtransitive
+ all
+
+
runtime; build; native; contentfiles; analyzers; buildtransitive
all
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.dgspec.json b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.dgspec.json
new file mode 100644
index 000000000..2e0230fcf
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.dgspec.json
@@ -0,0 +1,103 @@
+{
+ "format": 1,
+ "restore": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {}
+ },
+ "projects": {
+ "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "projectName": "FileRestitcher",
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "packagesPath": "C:\\Users\\Dimitri\\.nuget\\packages\\",
+ "outputPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.NupkgProj\\",
+ "projectStyle": "PackageReference",
+ "crossTargeting": true,
+ "fallbackFolders": [
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages"
+ ],
+ "configFilePaths": [
+ "K:\\Proyects_Repos\\TorchSharp\\NuGet.Config",
+ "C:\\Users\\Dimitri\\AppData\\Roaming\\NuGet\\NuGet.Config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.FallbackLocation.config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.Offline.config"
+ ],
+ "originalTargetFrameworks": [
+ "net8.0",
+ "netstandard2.0"
+ ],
+ "sources": {
+ "C:\\Program Files (x86)\\Microsoft SDKs\\NuGetPackages\\": {},
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "projectReferences": {}
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "projectReferences": {}
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "all"
+ },
+ "SdkAnalysisLevel": "9.0.100"
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "frameworkReferences": {
+ "Microsoft.NETCore.App": {
+ "privateAssets": "all"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "dependencies": {
+ "NETStandard.Library": {
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.0.3, )",
+ "autoReferenced": true
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.props b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.props
new file mode 100644
index 000000000..9c25bbe46
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.props
@@ -0,0 +1,16 @@
+
+
+
+ True
+ NuGet
+ $(MSBuildThisFileDirectory)project.assets.json
+ $(UserProfile)\.nuget\packages\
+ C:\Users\Dimitri\.nuget\packages\;C:\Program Files (x86)\Microsoft Visual Studio\Shared\NuGetPackages
+ PackageReference
+ 6.12.0
+
+
+
+
+
+
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.targets b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.targets
new file mode 100644
index 000000000..2192724bc
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/FileRestitcher.csproj.nuget.g.targets
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/.NETStandard,Version=v2.0.AssemblyAttributes.cs b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/.NETStandard,Version=v2.0.AssemblyAttributes.cs
new file mode 100644
index 000000000..45b1ca02d
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/.NETStandard,Version=v2.0.AssemblyAttributes.cs
@@ -0,0 +1,4 @@
+//
+using System;
+using System.Reflection;
+[assembly: global::System.Runtime.Versioning.TargetFrameworkAttribute(".NETStandard,Version=v2.0", FrameworkDisplayName = "")]
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfo.cs b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfo.cs
new file mode 100644
index 000000000..4e5534e0c
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfo.cs
@@ -0,0 +1,24 @@
+//------------------------------------------------------------------------------
+//
+// Este código fue generado por una herramienta.
+// Versión de runtime:4.0.30319.42000
+//
+// Los cambios en este archivo podrían causar un comportamiento incorrecto y se perderán si
+// se vuelve a generar el código.
+//
+//------------------------------------------------------------------------------
+
+using System;
+using System.Reflection;
+
+[assembly: System.Reflection.AssemblyCompanyAttribute("TorchSharp contributors")]
+[assembly: System.Reflection.AssemblyConfigurationAttribute("Debug")]
+[assembly: System.Reflection.AssemblyCopyrightAttribute("Copyright .NET Foundation and Contributors")]
+[assembly: System.Reflection.AssemblyFileVersionAttribute("1.0.0.0")]
+[assembly: System.Reflection.AssemblyInformationalVersionAttribute("1.0.0+4436c93f069a66702e1d89cb9325f40b734bbaa5")]
+[assembly: System.Reflection.AssemblyProductAttribute("FileRestitcher")]
+[assembly: System.Reflection.AssemblyTitleAttribute("FileRestitcher")]
+[assembly: System.Reflection.AssemblyVersionAttribute("1.0.0.0")]
+
+// Generado por la clase WriteCodeFragment de MSBuild.
+
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfoInputs.cache b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfoInputs.cache
new file mode 100644
index 000000000..033a7b8cf
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.AssemblyInfoInputs.cache
@@ -0,0 +1 @@
+c5138ff11eebd7d3b469eae6088b319f69826365e9da38b98fa1a61dfe12e010
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.GeneratedMSBuildEditorConfig.editorconfig b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.GeneratedMSBuildEditorConfig.editorconfig
new file mode 100644
index 000000000..acc3874e1
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.GeneratedMSBuildEditorConfig.editorconfig
@@ -0,0 +1,8 @@
+is_global = true
+build_property.RootNamespace = FileRestitcher
+build_property.ProjectDir = K:\Proyects_Repos\TorchSharp\pkg\FileRestitcher\FileRestitcher\
+build_property.EnableComHosting =
+build_property.EnableGeneratedComInterfaceComImportInterop =
+build_property.CsWinRTUseWindowsUIXamlProjections = false
+build_property.EffectiveAnalysisLevelStyle =
+build_property.EnableCodeStyleSeverity =
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.assets.cache b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.assets.cache
new file mode 100644
index 000000000..bcfab3c00
Binary files /dev/null and b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.assets.cache differ
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.csproj.AssemblyReference.cache b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.csproj.AssemblyReference.cache
new file mode 100644
index 000000000..e722955cd
Binary files /dev/null and b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/netstandard2.0/FileRestitcher.csproj.AssemblyReference.cache differ
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.assets.json b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.assets.json
new file mode 100644
index 000000000..c5f885f89
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.assets.json
@@ -0,0 +1,283 @@
+{
+ "version": 3,
+ "targets": {
+ ".NETStandard,Version=v2.0": {
+ "Microsoft.NETCore.Platforms/1.1.0": {
+ "type": "package",
+ "compile": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "runtime": {
+ "lib/netstandard1.0/_._": {}
+ }
+ },
+ "NETStandard.Library/2.0.3": {
+ "type": "package",
+ "dependencies": {
+ "Microsoft.NETCore.Platforms": "1.1.0"
+ },
+ "compile": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "runtime": {
+ "lib/netstandard1.0/_._": {}
+ },
+ "build": {
+ "build/netstandard2.0/NETStandard.Library.targets": {}
+ }
+ }
+ },
+ "net8.0": {}
+ },
+ "libraries": {
+ "Microsoft.NETCore.Platforms/1.1.0": {
+ "sha512": "kz0PEW2lhqygehI/d6XsPCQzD7ff7gUJaVGPVETX611eadGsA3A877GdSlU0LRVMCTH/+P3o2iDTak+S08V2+A==",
+ "type": "package",
+ "path": "microsoft.netcore.platforms/1.1.0",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "ThirdPartyNotices.txt",
+ "dotnet_library_license.txt",
+ "lib/netstandard1.0/_._",
+ "microsoft.netcore.platforms.1.1.0.nupkg.sha512",
+ "microsoft.netcore.platforms.nuspec",
+ "runtime.json"
+ ]
+ },
+ "NETStandard.Library/2.0.3": {
+ "sha512": "st47PosZSHrjECdjeIzZQbzivYBJFv6P2nv4cj2ypdI204DO+vZ7l5raGMiX4eXMJ53RfOIg+/s4DHVZ54Nu2A==",
+ "type": "package",
+ "path": "netstandard.library/2.0.3",
+ "files": [
+ ".nupkg.metadata",
+ ".signature.p7s",
+ "LICENSE.TXT",
+ "THIRD-PARTY-NOTICES.TXT",
+ "build/netstandard2.0/NETStandard.Library.targets",
+ "build/netstandard2.0/ref/Microsoft.Win32.Primitives.dll",
+ "build/netstandard2.0/ref/System.AppContext.dll",
+ "build/netstandard2.0/ref/System.Collections.Concurrent.dll",
+ "build/netstandard2.0/ref/System.Collections.NonGeneric.dll",
+ "build/netstandard2.0/ref/System.Collections.Specialized.dll",
+ "build/netstandard2.0/ref/System.Collections.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.Composition.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.EventBasedAsync.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.Primitives.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.TypeConverter.dll",
+ "build/netstandard2.0/ref/System.ComponentModel.dll",
+ "build/netstandard2.0/ref/System.Console.dll",
+ "build/netstandard2.0/ref/System.Core.dll",
+ "build/netstandard2.0/ref/System.Data.Common.dll",
+ "build/netstandard2.0/ref/System.Data.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Contracts.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Debug.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.FileVersionInfo.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Process.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.StackTrace.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.TextWriterTraceListener.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Tools.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.TraceSource.dll",
+ "build/netstandard2.0/ref/System.Diagnostics.Tracing.dll",
+ "build/netstandard2.0/ref/System.Drawing.Primitives.dll",
+ "build/netstandard2.0/ref/System.Drawing.dll",
+ "build/netstandard2.0/ref/System.Dynamic.Runtime.dll",
+ "build/netstandard2.0/ref/System.Globalization.Calendars.dll",
+ "build/netstandard2.0/ref/System.Globalization.Extensions.dll",
+ "build/netstandard2.0/ref/System.Globalization.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.FileSystem.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.ZipFile.dll",
+ "build/netstandard2.0/ref/System.IO.Compression.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.DriveInfo.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.Primitives.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.Watcher.dll",
+ "build/netstandard2.0/ref/System.IO.FileSystem.dll",
+ "build/netstandard2.0/ref/System.IO.IsolatedStorage.dll",
+ "build/netstandard2.0/ref/System.IO.MemoryMappedFiles.dll",
+ "build/netstandard2.0/ref/System.IO.Pipes.dll",
+ "build/netstandard2.0/ref/System.IO.UnmanagedMemoryStream.dll",
+ "build/netstandard2.0/ref/System.IO.dll",
+ "build/netstandard2.0/ref/System.Linq.Expressions.dll",
+ "build/netstandard2.0/ref/System.Linq.Parallel.dll",
+ "build/netstandard2.0/ref/System.Linq.Queryable.dll",
+ "build/netstandard2.0/ref/System.Linq.dll",
+ "build/netstandard2.0/ref/System.Net.Http.dll",
+ "build/netstandard2.0/ref/System.Net.NameResolution.dll",
+ "build/netstandard2.0/ref/System.Net.NetworkInformation.dll",
+ "build/netstandard2.0/ref/System.Net.Ping.dll",
+ "build/netstandard2.0/ref/System.Net.Primitives.dll",
+ "build/netstandard2.0/ref/System.Net.Requests.dll",
+ "build/netstandard2.0/ref/System.Net.Security.dll",
+ "build/netstandard2.0/ref/System.Net.Sockets.dll",
+ "build/netstandard2.0/ref/System.Net.WebHeaderCollection.dll",
+ "build/netstandard2.0/ref/System.Net.WebSockets.Client.dll",
+ "build/netstandard2.0/ref/System.Net.WebSockets.dll",
+ "build/netstandard2.0/ref/System.Net.dll",
+ "build/netstandard2.0/ref/System.Numerics.dll",
+ "build/netstandard2.0/ref/System.ObjectModel.dll",
+ "build/netstandard2.0/ref/System.Reflection.Extensions.dll",
+ "build/netstandard2.0/ref/System.Reflection.Primitives.dll",
+ "build/netstandard2.0/ref/System.Reflection.dll",
+ "build/netstandard2.0/ref/System.Resources.Reader.dll",
+ "build/netstandard2.0/ref/System.Resources.ResourceManager.dll",
+ "build/netstandard2.0/ref/System.Resources.Writer.dll",
+ "build/netstandard2.0/ref/System.Runtime.CompilerServices.VisualC.dll",
+ "build/netstandard2.0/ref/System.Runtime.Extensions.dll",
+ "build/netstandard2.0/ref/System.Runtime.Handles.dll",
+ "build/netstandard2.0/ref/System.Runtime.InteropServices.RuntimeInformation.dll",
+ "build/netstandard2.0/ref/System.Runtime.InteropServices.dll",
+ "build/netstandard2.0/ref/System.Runtime.Numerics.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Formatters.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Json.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Primitives.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.Xml.dll",
+ "build/netstandard2.0/ref/System.Runtime.Serialization.dll",
+ "build/netstandard2.0/ref/System.Runtime.dll",
+ "build/netstandard2.0/ref/System.Security.Claims.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Algorithms.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Csp.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Encoding.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.Primitives.dll",
+ "build/netstandard2.0/ref/System.Security.Cryptography.X509Certificates.dll",
+ "build/netstandard2.0/ref/System.Security.Principal.dll",
+ "build/netstandard2.0/ref/System.Security.SecureString.dll",
+ "build/netstandard2.0/ref/System.ServiceModel.Web.dll",
+ "build/netstandard2.0/ref/System.Text.Encoding.Extensions.dll",
+ "build/netstandard2.0/ref/System.Text.Encoding.dll",
+ "build/netstandard2.0/ref/System.Text.RegularExpressions.dll",
+ "build/netstandard2.0/ref/System.Threading.Overlapped.dll",
+ "build/netstandard2.0/ref/System.Threading.Tasks.Parallel.dll",
+ "build/netstandard2.0/ref/System.Threading.Tasks.dll",
+ "build/netstandard2.0/ref/System.Threading.Thread.dll",
+ "build/netstandard2.0/ref/System.Threading.ThreadPool.dll",
+ "build/netstandard2.0/ref/System.Threading.Timer.dll",
+ "build/netstandard2.0/ref/System.Threading.dll",
+ "build/netstandard2.0/ref/System.Transactions.dll",
+ "build/netstandard2.0/ref/System.ValueTuple.dll",
+ "build/netstandard2.0/ref/System.Web.dll",
+ "build/netstandard2.0/ref/System.Windows.dll",
+ "build/netstandard2.0/ref/System.Xml.Linq.dll",
+ "build/netstandard2.0/ref/System.Xml.ReaderWriter.dll",
+ "build/netstandard2.0/ref/System.Xml.Serialization.dll",
+ "build/netstandard2.0/ref/System.Xml.XDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XPath.XDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XPath.dll",
+ "build/netstandard2.0/ref/System.Xml.XmlDocument.dll",
+ "build/netstandard2.0/ref/System.Xml.XmlSerializer.dll",
+ "build/netstandard2.0/ref/System.Xml.dll",
+ "build/netstandard2.0/ref/System.dll",
+ "build/netstandard2.0/ref/mscorlib.dll",
+ "build/netstandard2.0/ref/netstandard.dll",
+ "build/netstandard2.0/ref/netstandard.xml",
+ "lib/netstandard1.0/_._",
+ "netstandard.library.2.0.3.nupkg.sha512",
+ "netstandard.library.nuspec"
+ ]
+ }
+ },
+ "projectFileDependencyGroups": {
+ ".NETStandard,Version=v2.0": [
+ "NETStandard.Library >= 2.0.3"
+ ],
+ "net8.0": []
+ },
+ "packageFolders": {
+ "C:\\Users\\Dimitri\\.nuget\\packages\\": {},
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages": {}
+ },
+ "project": {
+ "version": "1.0.0",
+ "restore": {
+ "projectUniqueName": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "projectName": "FileRestitcher",
+ "projectPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "packagesPath": "C:\\Users\\Dimitri\\.nuget\\packages\\",
+ "outputPath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.NupkgProj\\",
+ "projectStyle": "PackageReference",
+ "crossTargeting": true,
+ "fallbackFolders": [
+ "C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\NuGetPackages"
+ ],
+ "configFilePaths": [
+ "K:\\Proyects_Repos\\TorchSharp\\NuGet.Config",
+ "C:\\Users\\Dimitri\\AppData\\Roaming\\NuGet\\NuGet.Config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.FallbackLocation.config",
+ "C:\\Program Files (x86)\\NuGet\\Config\\Microsoft.VisualStudio.Offline.config"
+ ],
+ "originalTargetFrameworks": [
+ "net8.0",
+ "netstandard2.0"
+ ],
+ "sources": {
+ "C:\\Program Files (x86)\\Microsoft SDKs\\NuGetPackages\\": {},
+ "https://api.nuget.org/v3/index.json": {}
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "projectReferences": {}
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "projectReferences": {}
+ }
+ },
+ "warningProperties": {
+ "warnAsError": [
+ "NU1605"
+ ]
+ },
+ "restoreAuditProperties": {
+ "enableAudit": "true",
+ "auditLevel": "low",
+ "auditMode": "all"
+ },
+ "SdkAnalysisLevel": "9.0.100"
+ },
+ "frameworks": {
+ "net8.0": {
+ "targetAlias": "net8.0",
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "frameworkReferences": {
+ "Microsoft.NETCore.App": {
+ "privateAssets": "all"
+ }
+ },
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ },
+ "netstandard2.0": {
+ "targetAlias": "netstandard2.0",
+ "dependencies": {
+ "NETStandard.Library": {
+ "suppressParent": "All",
+ "target": "Package",
+ "version": "[2.0.3, )",
+ "autoReferenced": true
+ }
+ },
+ "imports": [
+ "net461",
+ "net462",
+ "net47",
+ "net471",
+ "net472",
+ "net48",
+ "net481"
+ ],
+ "assetTargetFallback": true,
+ "warn": true,
+ "runtimeIdentifierGraphPath": "C:\\Program Files\\dotnet\\sdk\\9.0.100\\RuntimeIdentifierGraph.json"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.nuget.cache b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.nuget.cache
new file mode 100644
index 000000000..aab7970d8
--- /dev/null
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.NupkgProj/project.nuget.cache
@@ -0,0 +1,11 @@
+{
+ "version": 2,
+ "dgSpecHash": "rM+0M7K4/ZA=",
+ "success": true,
+ "projectFilePath": "K:\\Proyects_Repos\\TorchSharp\\pkg\\FileRestitcher\\FileRestitcher\\FileRestitcher.csproj",
+ "expectedPackageFiles": [
+ "C:\\Users\\Dimitri\\.nuget\\packages\\microsoft.netcore.platforms\\1.1.0\\microsoft.netcore.platforms.1.1.0.nupkg.sha512",
+ "C:\\Users\\Dimitri\\.nuget\\packages\\netstandard.library\\2.0.3\\netstandard.library.2.0.3.nupkg.sha512"
+ ],
+ "logs": []
+}
\ No newline at end of file
diff --git a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.csproj b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.csproj
index 3ab2bb061..0b61b7138 100644
--- a/pkg/FileRestitcher/FileRestitcher/FileRestitcher.csproj
+++ b/pkg/FileRestitcher/FileRestitcher/FileRestitcher.csproj
@@ -1,10 +1,10 @@
-
+
false
Library
- netstandard2.0
+ netstandard2.0;net8.0
false
-
+
diff --git a/pkg/pack.proj b/pkg/pack.proj
index 3c9db2f98..c05c5e610 100644
--- a/pkg/pack.proj
+++ b/pkg/pack.proj
@@ -1,6 +1,6 @@
-
+
diff --git a/src/Examples.Utils/Examples.Utils.csproj b/src/Examples.Utils/Examples.Utils.csproj
index 884b48c18..de3667512 100644
--- a/src/Examples.Utils/Examples.Utils.csproj
+++ b/src/Examples.Utils/Examples.Utils.csproj
@@ -1,9 +1,11 @@
-
+
9.0
+ net8.0
+ net472;$(TargetFrameworks);netstandard2.0
net8.0
@@ -17,7 +19,10 @@
-
+
+
+
+
diff --git a/src/Examples.Utils/Vocab.cs b/src/Examples.Utils/Vocab.cs
index 743e4c55c..7a1deb298 100644
--- a/src/Examples.Utils/Vocab.cs
+++ b/src/Examples.Utils/Vocab.cs
@@ -88,12 +88,17 @@ public void Add(KeyValuePair item)
{
Add(item.Key, item.Value);
}
-
+#if NETSTANDARD2_0
+ public bool TryGetValue(string key, out int value)
+ {
+ return _dict.TryGetValue(key, out value);
+ }
+#else
public bool TryGetValue(string key, [MaybeNullWhen(false)] out int value)
{
return _dict.TryGetValue(key, out value);
}
-
+#endif
private Dictionary _dict = new Dictionary();
private int _last = 0;
}
diff --git a/src/Examples/AdversarialExampleGeneration.cs b/src/Examples/AdversarialExampleGeneration.cs
index 7bfc174b2..49bd10956 100644
--- a/src/Examples/AdversarialExampleGeneration.cs
+++ b/src/Examples/AdversarialExampleGeneration.cs
@@ -34,6 +34,8 @@ public class AdversarialExampleGeneration
{
#if NET472_OR_GREATER
private readonly static string _dataLocation = NSPath.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "mnist");
+#elif NETSTANDARD2_0
+ private readonly static string _dataLocation = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "mnist");
#else
private readonly static string _dataLocation = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "mnist");
#endif // NET472_OR_GREATER
diff --git a/src/Examples/Examples.csproj b/src/Examples/Examples.csproj
index 0d2053a31..cc2fe7824 100644
--- a/src/Examples/Examples.csproj
+++ b/src/Examples/Examples.csproj
@@ -1,11 +1,12 @@
-
+
Exe
true
true
-
+
+ net472;netstandard2.0;$(TargetFrameworks)
9.0
net8.0
true
@@ -23,9 +24,11 @@
+
+
diff --git a/src/Examples/SequenceToSequence.cs b/src/Examples/SequenceToSequence.cs
index 436c05a67..8ff2c6dc5 100644
--- a/src/Examples/SequenceToSequence.cs
+++ b/src/Examples/SequenceToSequence.cs
@@ -6,6 +6,7 @@
using System.Diagnostics;
using static TorchSharp.torch;
using static TorchSharp.torch.nn;
+using System.Text.RegularExpressions;
namespace TorchSharp.Examples
{
@@ -26,6 +27,8 @@ public class SequenceToSequence
// This path assumes that you're running this on Windows.
#if NET472_OR_GREATER
private readonly static string _dataLocation = NSPath.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "wikitext-2-v1");
+#elif NETSTANDARD2_0
+ private readonly static string _dataLocation = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "wikitext-2-v1");
#else
private readonly static string _dataLocation = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "wikitext-2-v1");
#endif // NET472_OR_GREATER
@@ -251,7 +254,11 @@ private void InitWeights()
public override Tensor forward(Tensor t, Tensor mask)
{
+#if !NETSTANDARD2_0
var src = pos_encoder.call(encoder.call(t) * MathF.Sqrt(ninputs));
+#else
+ var src = pos_encoder.call(encoder.call(t) * (float)Math.Sqrt(ninputs));
+#endif
var enc = transformer_encoder.call(src, mask);
return decoder.call(enc);
}
diff --git a/src/Examples/TextClassification.cs b/src/Examples/TextClassification.cs
index 8fb175718..4cdc79bc1 100644
--- a/src/Examples/TextClassification.cs
+++ b/src/Examples/TextClassification.cs
@@ -36,6 +36,8 @@ public class TextClassification
// This path assumes that you're running this on Windows.
#if NET472_OR_GREATER
private readonly static string _dataLocation = NSPath.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "AG_NEWS");
+#elif NETSTANDARD2_0
+ private readonly static string _dataLocation = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "AG_NEWS");
#else
private readonly static string _dataLocation = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", "AG_NEWS");
#endif // NET472_OR_GREATER
diff --git a/src/FSharp.Examples/FSharp.Examples.fsproj b/src/FSharp.Examples/FSharp.Examples.fsproj
index 6468ce393..4f0ab0811 100644
--- a/src/FSharp.Examples/FSharp.Examples.fsproj
+++ b/src/FSharp.Examples/FSharp.Examples.fsproj
@@ -1,4 +1,4 @@
-
+
Exe
@@ -23,7 +23,10 @@
+
+
+
diff --git a/src/Native/CMakeSettings.json b/src/Native/CMakeSettings.json
index 9204f06eb..11d28e957 100644
--- a/src/Native/CMakeSettings.json
+++ b/src/Native/CMakeSettings.json
@@ -1,4 +1,4 @@
-{
+{
"configurations": [
{
"name": "x64-Debug",
diff --git a/src/Native/LibTorchSharp/CMakeLists.txt b/src/Native/LibTorchSharp/CMakeLists.txt
index 60b61f049..560fba1a2 100644
--- a/src/Native/LibTorchSharp/CMakeLists.txt
+++ b/src/Native/LibTorchSharp/CMakeLists.txt
@@ -1,15 +1,38 @@
project(LibTorchSharp)
+find_package(CUDA)
+if(CUDA_FOUND)
+ include_directories(${CUDA_INCLUDE_DIRS})
+ link_directories(${CUDA_LIBRARY_DIRS})
+ add_compile_definitions(TORCHSHARP_CUDA_TOOLKIT_FOUND)
+endif()
+
+add_compile_definitions(NOMINMAX)
+
+
+#add_library(CUDA::nvToolsExt INTERFACE IMPORTED)
+# ensure that PyTorch is told to use NVTX3 headers
+#target_compile_definitions(CUDA::nvToolsExt INTERFACETORCH_CUDA_USE_NVTX3)
+#target_link_libraries(CUDA::nvToolsExt INTERFACE CUDA::nvtx3)
+
+
+
if(APPLE AND NOT LIBTORCH_ARCH STREQUAL "arm64")
include_directories("/usr/local/include" "/usr/local/opt/llvm/include")
link_directories("/usr/local/lib" "/usr/local/opt/llvm/lib")
endif()
+
+#set(LIBTORCH_PATH "K:/FrameworksForC/LibTorch/libtorch-win-shared-with-deps-2.6.0+cu126")
find_package(Torch REQUIRED PATHS ${LIBTORCH_PATH})
+#find_package(Torch CONFIG)
set(SOURCES
cifar10.h
crc32c.h
+ THSAmp.h
THSAutograd.h
+ THSBFloat16.h
+ THSCuda.h
THSData.h
THSJIT.h
THSNN.h
@@ -21,8 +44,12 @@ set(SOURCES
cifar10.cpp
crc32c.c
THSActivation.cpp
+ THSAmp.cpp
THSAutograd.cpp
- THSData.cpp
+ THSBFloat16.cpp
+ THSCuda.cpp
+ THSConvolution.cpp
+ THSData.cpp
THSFFT.cpp
THSJIT.cpp
THSLinearAlgebra.cpp
@@ -70,6 +97,10 @@ include_directories(${TORCH_INCLUDE_DIRS})
add_library(LibTorchSharp SHARED ${SOURCES} ${RESOURCES})
+if(CUDA_FOUND)
+target_link_libraries(LibTorchSharp ${CUDA_LIBRARIES})
+endif()
+
target_link_libraries(LibTorchSharp ${TORCH_LIBRARIES})
set_property(TARGET LibTorchSharp PROPERTY CXX_STANDARD 14)
diff --git a/src/Native/LibTorchSharp/THSActivation.cpp b/src/Native/LibTorchSharp/THSActivation.cpp
index c89beaab6..966e5afc3 100644
--- a/src/Native/LibTorchSharp/THSActivation.cpp
+++ b/src/Native/LibTorchSharp/THSActivation.cpp
@@ -2,3 +2,331 @@
#include "THSNN.h"
#include
+
+NNModule THSNN_CELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::CELUOptions().alpha(alpha).inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_CELU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_ELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::ELUOptions().alpha(alpha).inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_ELU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_GELU_ctor(NNAnyModule* outAsAnyModule, const char* approximate)
+{
+ //res = create_module(outAsAnyModule);
+ CATCH_RETURN_NNModule(
+ res = create_module(torch::nn::GELUOptions().approximate(std::string(approximate)), outAsAnyModule);
+ );
+}
+
+Tensor THSNN_GELU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_GLU_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::GLUOptions().dim(dim);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_GLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Hardshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::HardshrinkOptions(lambda);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Hardshrink_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Hardtanh_ctor(const double min_val, const double max_val, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::HardtanhOptions()
+ .min_val(min_val)
+ .max_val(max_val)
+ .inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Hardtanh_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+
+NNModule THSNN_LeakyReLU_ctor(const double negative_sloope, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::LeakyReLUOptions().negative_slope(negative_sloope).inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_LeakyReLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_LogSoftmax_ctor(int64_t dim, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::LogSoftmaxOptions(dim);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_LogSoftmax_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Mish_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Mish_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_PReLU_ctor(const int64_t nparams, const double init, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::PReLUOptions().num_parameters(nparams).init(init);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_PReLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+Tensor THSNN_PReLU_weight(const NNModule module)
+{
+ return get_weight(module);
+}
+
+void THSNN_PReLU_set_weight(const NNModule module, const Tensor weight)
+{
+ set_weight(module, weight);
+}
+
+NNModule THSNN_ReLU_ctor(bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::ReLUOptions(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_ReLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_RReLU_ctor(const double lower, const double upper, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::RReLUOptions().lower(lower).upper(upper).inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_RReLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_ReLU6_ctor(bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::ReLU6Options(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_ReLU6_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_SELU_ctor(bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::SELUOptions(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_SELU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Sigmoid_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Sigmoid_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_SiLU_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_SiLU_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softmax2d_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softmax2d_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softmax_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::SoftmaxOptions(dim);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softmax_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softmin_ctor(const int64_t dim, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::SoftminOptions(dim);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softmin_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softplus_ctor(const double beta, const double threshold, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::SoftplusOptions().beta(beta).threshold(threshold);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softplus_forward(const NNModule module, const Tensor tensor) {
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::SoftshrinkOptions().lambda(lambda);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softshrink_forward(const NNModule module, const Tensor tensor) {
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Softsign_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Softsign_forward(const NNModule module, const Tensor tensor) {
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Tanh_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Tanh_forward(const NNModule module, const Tensor tensor)
+{
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Tanhshrink_ctor(NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ res = create_module(outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Tanhshrink_forward(const NNModule module, const Tensor tensor) {
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
+NNModule THSNN_Threshold_ctor(const double threshold, const double value, const bool inplace, NNAnyModule* outAsAnyModule)
+{
+ CATCH_RETURN_NNModule(
+ auto opts = torch::nn::ThresholdOptions(threshold, value).inplace(inplace);
+ res = create_module(opts, outAsAnyModule);
+ );
+}
+
+Tensor THSNN_Threshold_forward(const NNModule module, const Tensor tensor) {
+ CATCH_TENSOR((*module)->as()->forward(*tensor));
+}
+
diff --git a/src/Native/LibTorchSharp/THSAmp.cpp b/src/Native/LibTorchSharp/THSAmp.cpp
new file mode 100644
index 000000000..79c6da9f2
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSAmp.cpp
@@ -0,0 +1,89 @@
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+#include "THSAmp.h"
+
+#include
+#include
+#include "torch/torch.h"
+#include "torch/cuda.h"
+
+/*void THSAmp_amp_foreach_non_finite_check_and_unscale_(const at::TensorList self, at::Tensor& found_inf, const at::Tensor& inv_scale)
+{
+ torch::_amp_foreach_non_finite_check_and_unscale_(self, found_inf, inv_scale);
+}*/
+
+void THSAmp_amp_foreach_non_finite_check_and_unscale_(Tensor* self, const int64_t tLength, at::Tensor& found_inf, const at::Tensor& inv_scale)
+{
+ torch::_amp_foreach_non_finite_check_and_unscale_(toTensors((torch::Tensor**)self, tLength),found_inf,inv_scale);
+}
+
+Tensor THSAmp_amp_update_scale_(at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
+ CATCH_TENSOR(torch::_amp_update_scale_(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);)
+}
+Tensor THSAmp_amp_update_scale_out(at::Tensor& out, const at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval){
+ CATCH_TENSOR(torch::_amp_update_scale_out(out, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);)
+}
+Tensor THSAmp_amp_update_scale_outf(const at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor& out){
+ CATCH_TENSOR(torch::_amp_update_scale_outf(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out);)
+}
+
+Tensor THSAMP_amp_update_scale(const at::Tensor& self, const at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, Tensor* sec)
+{
+ std::tuple res;
+ CATCH(res = torch::_amp_update_scale(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);)
+ *sec = ResultTensor(std::get<1>(res));
+ return ResultTensor(std::get<0>(res));
+}
+
+bool THSAmp_is_torch_function_mode_enabled()
+{
+ return at::impl::torch_function_mode_enabled(); //https://github.com/pytorch/pytorch/blob/2c91e13afc6edcfe0a0e6189a88aae4ecbbf3516/torch/csrc/autograd/init.cpp#L911
+}
+
+bool THSAmp_is_autocast_cache_enabled()
+{
+ return at::autocast::is_autocast_cache_enabled();
+}
+
+bool THSAmp_is_autocast_available(int8_t device)
+{
+ return at::autocast::is_autocast_available((c10::DeviceType)device);
+}
+
+
+bool THSAmp_is_autocast_enabled(int8_t device)
+{
+ return at::autocast::is_autocast_enabled((at::DeviceType)device);
+}
+
+int8_t THSAmp_get_autocast_dtype(int8_t device)
+{
+ return (int8_t)at::autocast::get_autocast_dtype((at::DeviceType)device);
+}
+
+void THSAmp_set_autocast_dtype(int8_t device, int8_t dtype)
+{
+ at::autocast::set_autocast_dtype((at::DeviceType)device, (at::ScalarType)dtype);
+}
+
+void THSAmp_set_autocast_enabled(int8_t device, bool enabled)
+{
+ at::autocast::set_autocast_enabled((at::DeviceType)device, enabled);
+}
+int THSAmp_autocast_increment_nesting()
+{
+ return at::autocast::increment_nesting();
+}
+
+int THSAmp_autocast_decrement_nesting()
+{
+ return at::autocast::decrement_nesting();
+}
+
+void THSAmp_clear_autocast_cache()
+{
+ at::autocast::clear_cache();
+}
+void THSAmp_set_autocast_cache_enabled(bool enabled)
+{
+ at::autocast::set_autocast_cache_enabled(enabled);
+}
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSAmp.h b/src/Native/LibTorchSharp/THSAmp.h
new file mode 100644
index 000000000..4ae115dda
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSAmp.h
@@ -0,0 +1,36 @@
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+#pragma once
+
+#include "../Stdafx.h"
+#include "Utils.h"
+
+//https://github.com/pytorch/pytorch/blob/main/torch/_meta_registrations.py#L5957
+//EXPORT_API(void) THSAmp_amp_foreach_non_finite_check_and_unscale_(const at::TensorList self, at::Tensor& found_inf, const at::Tensor& inv_scale);
+
+EXPORT_API(void) THSAmp_amp_foreach_non_finite_check_and_unscale_(Tensor* self, const int64_t tLength, at::Tensor& found_inf, const at::Tensor& inv_scale);
+
+//EXPORT_API(void) THSAmp_amp_update_scale_(const at::Tensor& self, const at::Tensor& inv_scale);
+
+EXPORT_API(Tensor) THSAmp_amp_update_scale_(at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
+EXPORT_API(Tensor) THSAmp_amp_update_scale_out(at::Tensor& out, const at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
+EXPORT_API(Tensor) THSAmp_amp_update_scale_outf(const at::Tensor& self, at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor& out);
+EXPORT_API(Tensor) THSAMP_amp_update_scale(const at::Tensor& self, const at::Tensor& growth_tracker, const at::Tensor& found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, Tensor* sec);
+
+EXPORT_API(bool) THSAmp_is_torch_function_mode_enabled();
+
+EXPORT_API(bool) THSAmp_is_autocast_cache_enabled();
+
+EXPORT_API(bool) THSAmp_is_autocast_available(int8_t device);
+
+EXPORT_API(bool) THSAmp_is_autocast_enabled(int8_t device);
+EXPORT_API(int8_t) THSAmp_get_autocast_dtype(int8_t device);
+EXPORT_API(void) THSAmp_set_autocast_enabled(int8_t device, bool enabled);
+EXPORT_API(void) THSAmp_set_autocast_dtype(int8_t device, int8_t dtype);
+
+EXPORT_API(int) THSAmp_autocast_increment_nesting();
+EXPORT_API(int) THSAmp_autocast_decrement_nesting();
+
+EXPORT_API(void) THSAmp_set_autocast_cache_enabled(bool enabled);
+EXPORT_API(void) THSAmp_clear_autocast_cache();
+
+//EXPORT_API(bool) THSTorch_jit_is_scripting();
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSAutograd.cpp b/src/Native/LibTorchSharp/THSAutograd.cpp
index 63059eb95..9fc6b5d12 100644
--- a/src/Native/LibTorchSharp/THSAutograd.cpp
+++ b/src/Native/LibTorchSharp/THSAutograd.cpp
@@ -143,46 +143,57 @@ void THSAutograd_CSharpNode_clearInputMetadata(CSharpNodePtr node) {
}
void THSAutograd_Function_wrapOutputs(TensorArray vars_, TensorArray nonDiff_, TensorArray dirty_, TensorArray outputs_, CSharpNodePtr node, Tensor* (*allocator)(size_t length)) {
- CATCH(
- auto vars = toTensors(vars_.array, vars_.size);
- auto output_tensors = toTensors(outputs_.array, outputs_.size);
- auto outputs = torch::autograd::to_optional(output_tensors);
-
- // Convert the list of Tensor to a set of unsafe impl
- std::unordered_set nonDiff;
- nonDiff.reserve(nonDiff_.size);
- for (int i = 0; i < nonDiff_.size; i++)
- nonDiff.insert(nonDiff_.array[i]->unsafeGetTensorImpl());
-
- // Convert the list of Tensors to a set of unsafe impl, and then apply the behavior of AutogradContext::get_and_bump_dirty()
- std::unordered_set dirty;
- dirty.reserve(dirty_.size);
- for (int i = 0; i < dirty_.size; i++) {
- auto t = dirty_.array[i]->unsafeGetTensorImpl();
- t->bump_version();
- dirty.insert(t);
+ torch_last_err = 0;
+ try {
+ auto vars = toTensors(vars_.array, vars_.size);
+ auto output_tensors = toTensors(outputs_.array, outputs_.size);
+ auto outputs = torch::autograd::to_optional(output_tensors);
+
+ // Convert the list of Tensor to a set of unsafe impl
+ std::unordered_set nonDiff;
+ nonDiff.reserve(nonDiff_.size);
+ for (int i = 0; i < nonDiff_.size; i++)
+ nonDiff.insert(nonDiff_.array[i]->unsafeGetTensorImpl());
+
+ // Convert the list of Tensors to a set of unsafe impl, and then apply the behavior of AutogradContext::get_and_bump_dirty()
+ std::unordered_set dirty;
+ dirty.reserve(dirty_.size);
+ for (int i = 0; i < dirty_.size; i++) {
+ auto t = dirty_.array[i]->unsafeGetTensorImpl();
+ t->bump_version();
+ dirty.insert(t);
+ }
+
+ // Copied these functions from custom_function.h
+ torch::autograd::_jvp_fn_t jvp_fn = [](const variable_list& inputs,
+ const variable_list& gI) -> variable_list {
+ TORCH_CHECK(
+ false,
+ "jvp is not implemented for the c++ API of custom Function yet.",
+ "Please open a feature request on GitHub if you need this.");
+ };
+
+ auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
+ return x.view_as(x);
+ };
+
+ //auto res = torch::autograd::_wrap_outputs(vars, nonDiff, dirty, outputs, node.weak_ptr == nullptr || node.weak_ptr->expired() ? nullptr : node.weak_ptr->lock(), jvp_fn, {}, view_as_self_fn, false);
+#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 11
+ auto res = torch::autograd::_wrap_outputs(vars, nonDiff, dirty, outputs, node.weak_ptr == nullptr || node.weak_ptr->expired() ? nullptr : node.weak_ptr->lock(), jvp_fn, {}, view_as_self_fn, true);
+#else
+ auto res = torch::autograd::_wrap_outputs(vars, nonDiff, dirty, outputs, node.weak_ptr == nullptr || node.weak_ptr->expired() ? nullptr : node.weak_ptr->lock(), jvp_fn, {}, view_as_self_fn);
+#endif
+ auto sz = res.size();
+ Tensor* result = allocator(sz);
+ for (size_t i = 0; i < sz; i++)
+ result[i] = res[i].has_value() ? ResultTensor(res[i].value()) : nullptr;
+ }
+ catch (const c10::Error e) {
+ torch_last_err = strdup(e.what()); \
+ }
+ catch (const std::runtime_error e) {
+ torch_last_err = strdup(e.what()); \
}
-
- // Copied these functions from custom_function.h
- torch::autograd::_jvp_fn_t jvp_fn = [](const variable_list& inputs,
- const variable_list& gI) -> variable_list {
- TORCH_CHECK(
- false,
- "jvp is not implemented for the c++ API of custom Function yet.",
- "Please open a feature request on GitHub if you need this.");
- };
-
- auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
- return x.view_as(x);
- };
-
- auto res = torch::autograd::_wrap_outputs(vars, nonDiff, dirty, outputs, node.weak_ptr == nullptr || node.weak_ptr->expired() ? nullptr : node.weak_ptr->lock(), jvp_fn, {}, view_as_self_fn, false);
- auto sz = res.size();
-
- Tensor* result = allocator(sz);
- for (size_t i = 0; i < sz; i++)
- result[i] = res[i].has_value() ? ResultTensor(res[i].value()) : nullptr;
- )
}
SavedVariable THSAutograd_SavedVariable_ctor(Tensor variable, CSharpNodePtr node, bool is_inplace_on_view)
diff --git a/src/Native/LibTorchSharp/THSBFloat16.cpp b/src/Native/LibTorchSharp/THSBFloat16.cpp
new file mode 100644
index 000000000..34cecd97d
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSBFloat16.cpp
@@ -0,0 +1,101 @@
+#include "THSBFloat16.h"
+
+c10::BFloat16 THSBFloat16_ctor(float value)
+{
+ c10::BFloat16 bf16(value);
+ return bf16;
+}
+
+float THSBFloat16_op_float(c10::BFloat16 bf16)
+{
+ return static_cast(bf16);
+}
+
+c10::BFloat16 THSBFloat16_op_add(c10::BFloat16 a, c10::BFloat16 b){
+ return a + b;
+}
+c10::BFloat16 THSBFloat16_op_sub(c10::BFloat16 a, c10::BFloat16 b) {
+ return a - b;
+}
+c10::BFloat16 THSBFloat16_op_mul(c10::BFloat16 a, c10::BFloat16 b){
+ return a * b;
+}
+c10::BFloat16 THSBFloat16_op_div(c10::BFloat16 a, c10::BFloat16 b){
+ return a / b;
+}
+float THSBFloat16_op_add_float(c10::BFloat16 a, float b) {
+ return a + b;
+}
+float THSBFloat16_op_sub_float(c10::BFloat16 a, float b) {
+ return a - b;
+}
+float THSBFloat16_op_mul_float(c10::BFloat16 a, float b) {
+ return a * b;
+}
+float THSBFloat16_op_div_float(c10::BFloat16 a, float b) {
+ return a / b;
+}
+float THSBFloat16_op_add_lfloat(float a, c10::BFloat16 b) {
+ return a + b;
+}
+float THSBFloat16_op_sub_lfloat(float a, c10::BFloat16 b) {
+ return a - b;
+}
+float THSBFloat16_op_mul_lfloat(float a, c10::BFloat16 b) {
+ return a * b;
+}
+float THSBFloat16_op_div_lfloat(float a, c10::BFloat16 b) {
+ return a / b;
+}
+double THSBFloat16_op_add_double(c10::BFloat16 a, double b) {
+ return a + b;
+}
+double THSBFloat16_op_sub_double(c10::BFloat16 a, double b) {
+ return a - b;
+}
+double THSBFloat16_op_mul_double(c10::BFloat16 a, double b) {
+ return a * b;
+}
+double THSBFloat16_op_div_double(c10::BFloat16 a, double b) {
+ return a / b;
+}
+double THSBFloat16_op_add_ldouble(double a, c10::BFloat16 b) {
+ return a + b;
+}
+double THSBFloat16_op_sub_ldouble(double a, c10::BFloat16 b) {
+ return a - b;
+}
+double THSBFloat16_op_mul_ldouble(double a, c10::BFloat16 b) {
+ return a * b;
+}
+double THSBFloat16_op_div_ldouble(double a, c10::BFloat16 b) {
+ return a / b;
+}
+
+c10::BFloat16 THSBFloat16_min(c10::BFloat16 bf16) {
+ return std::numeric_limits::min();
+}
+c10::BFloat16 THSBFloat16_lowest(c10::BFloat16 bf16){
+ return std::numeric_limits::lowest();
+}
+c10::BFloat16 THSBFloat16_max(c10::BFloat16 bf16){
+ return std::numeric_limits::max();
+}
+c10::BFloat16 THSBFloat16_epsilon(c10::BFloat16 bf16){
+ return std::numeric_limits::epsilon();
+}
+c10::BFloat16 THSBFloat16_round_error(c10::BFloat16 bf16) {
+ return std::numeric_limits::round_error();
+}
+c10::BFloat16 THSBFloat16_nfinity(c10::BFloat16 bf16) {
+ return std::numeric_limits::infinity();
+}
+c10::BFloat16 THSBFloat16_quiet_NaN(c10::BFloat16 bf16) {
+ return std::numeric_limits::quiet_NaN();
+}
+c10::BFloat16 THSBFloat16_signaling_NaN(c10::BFloat16 bf16) {
+ return std::numeric_limits::signaling_NaN();
+}
+c10::BFloat16 THSBFloat16_denorm_min(c10::BFloat16 bf16) {
+ return std::numeric_limits::denorm_min();
+}
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSBFloat16.h b/src/Native/LibTorchSharp/THSBFloat16.h
new file mode 100644
index 000000000..522ebcad7
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSBFloat16.h
@@ -0,0 +1,43 @@
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+#pragma once
+
+#include "../Stdafx.h"
+#include "Utils.h"
+
+#include "c10/util/BFloat16.h"
+//#include "c10/util/BFloat16-inl.h"
+
+EXPORT_API(c10::BFloat16) THSBFloat16_ctor(float value);
+EXPORT_API(float) THSBFloat16_op_float(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_op_add(c10::BFloat16 a, c10::BFloat16 b);
+EXPORT_API(c10::BFloat16) THSBFloat16_op_sub(c10::BFloat16 a, c10::BFloat16 b);
+EXPORT_API(c10::BFloat16) THSBFloat16_op_mul(c10::BFloat16 a, c10::BFloat16 b);
+EXPORT_API(c10::BFloat16) THSBFloat16_op_div(c10::BFloat16 a, c10::BFloat16 b);
+
+EXPORT_API(float) THSBFloat16_op_add_float(c10::BFloat16 a, float b);
+EXPORT_API(float) THSBFloat16_op_sub_float(c10::BFloat16 a, float b);
+EXPORT_API(float) THSBFloat16_op_mul_float(c10::BFloat16 a, float b);
+EXPORT_API(float) THSBFloat16_op_div_float(c10::BFloat16 a, float b);
+EXPORT_API(float) THSBFloat16_op_add_lfloat(float a, c10::BFloat16 b);
+EXPORT_API(float) THSBFloat16_op_sub_lfloat(float a, c10::BFloat16 b);
+EXPORT_API(float) THSBFloat16_op_mul_lfloat(float a, c10::BFloat16 b);
+EXPORT_API(float) THSBFloat16_op_div_lfloat(float a, c10::BFloat16 b);
+
+EXPORT_API(double) THSBFloat16_op_add_double(c10::BFloat16 a, double b);
+EXPORT_API(double) THSBFloat16_op_sub_double(c10::BFloat16 a, double b);
+EXPORT_API(double) THSBFloat16_op_mul_double(c10::BFloat16 a, double b);
+EXPORT_API(double) THSBFloat16_op_div_double(c10::BFloat16 a, double b);
+EXPORT_API(double) THSBFloat16_op_add_ldouble(double a, c10::BFloat16 b);
+EXPORT_API(double) THSBFloat16_op_sub_ldouble(double a, c10::BFloat16 b);
+EXPORT_API(double) THSBFloat16_op_mul_ldouble(double a, c10::BFloat16 b);
+EXPORT_API(double) THSBFloat16_op_div_ldouble(double a, c10::BFloat16 b);
+
+EXPORT_API(c10::BFloat16) THSBFloat16_min(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_lowest(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_max(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_epsilon(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_round_error(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_infinity(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_quiet_NaN(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_signaling_NaN(c10::BFloat16 bf16);
+EXPORT_API(c10::BFloat16) THSBFloat16_denorm_min(c10::BFloat16 bf16);
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSConvolution.cpp b/src/Native/LibTorchSharp/THSConvolution.cpp
index 621f8935c..3d8ca6aed 100644
--- a/src/Native/LibTorchSharp/THSConvolution.cpp
+++ b/src/Native/LibTorchSharp/THSConvolution.cpp
@@ -66,6 +66,7 @@ void THSNN_Conv1d_set_weight(const NNModule module, const Tensor weight)
set_weight(module, weight);
}
+
NNModule THSNN_Conv2d_ctor(const int64_t inputChannel, const int64_t outputChannel,
const int64_t kernelSize, const int64_t stride, const int64_t padding,
const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias,
@@ -140,6 +141,13 @@ void THSNN_Conv2d_set_weight(const NNModule module, const Tensor weight)
set_weight(module, weight);
}
+/*void THSNN_Conv2d_print_options(const NNModule module) {
+ auto opt = (*module)->as()->options;
+ ::std::cout << "Conv2d (" << std::to_string(opt.in_channels()) << "," << std::to_string(opt.out_channels()) << ")" << std::endl;
+}*/
+
+
+
NNModule THSNN_Conv3d_ctor(const int64_t inputChannel, const int64_t outputChannel,
const int64_t kernelSize, const int64_t stride, const int64_t padding,
const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias,
diff --git a/src/Native/LibTorchSharp/THSCuda.cpp b/src/Native/LibTorchSharp/THSCuda.cpp
new file mode 100644
index 000000000..29ac526a6
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSCuda.cpp
@@ -0,0 +1,104 @@
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+#include "THSCuda.h"
+
+#include
+#include
+
+#ifdef CUDA_TOOLKIT_FOUND
+cudaDeviceProp THSCuda_get_device_prop(int device)
+{
+ cudaDeviceProp cdp;
+ //cudaGetDeviceProperties(&cdp, device);
+ cudaGetDeviceProperties_v2(&cdp, device);
+ return cdp;
+}
+#endif
+
+int THSCuda_get_major_compute_capability(int device)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ return THSCuda_get_device_prop(device).major;
+#else
+ return -1;
+#endif
+}
+
+int THSCuda_get_minor_compute_capability(int device)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ return THSCuda_get_device_prop(device).minor;
+#else
+ return -1;
+#endif
+}
+
+
+int THSCuda_get_device_count(int* count)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ return cudaGetDeviceCount(count);
+#else
+ return -1;
+#endif
+}
+
+int THSCuda_get_free_total(int device, int* id, size_t* free, size_t* total)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ cudaError_t res = cudaSetDevice(device);
+ if (res != CUDA_SUCCESS)
+ return -1;
+ res = cudaGetDevice(id);
+ if (res != CUDA_SUCCESS)
+ return -1;
+ return cudaMemGetInfo(free, total);
+#else
+ return -1;
+#endif
+}
+
+size_t THSCuda_get_total_memory(int device)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ return THSCuda_get_device_prop(device).totalConstMem;
+#else
+ return 0; //Is size_t (unsigned long) so cant be negative.
+#endif
+ //RETURN_CUDA_DEVICE(THSCuda_get_device_prop(device).totalConstMem)
+}
+
+
+size_t THSCuda_get_global_total_memory(int device)
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ return THSCuda_get_device_prop(device).totalGlobalMem;
+#else
+ return 0;
+#endif
+}
+
+const char* THSCuda_get_cuda_version()
+{
+#ifdef CUDA_TOOLKIT_FOUND
+ int runtimeVersion;
+ cudaError_t err = cudaRuntimeGetVersion(&runtimeVersion);
+
+ if (err != cudaSuccess) {
+ std::cerr << "Error getting CUDA runtime version: " << cudaGetErrorString(err) << std::endl;
+ return nullptr;
+ }
+
+ int major = runtimeVersion / 1000;
+ int minor = (runtimeVersion % 1000) / 10;
+ int patch = runtimeVersion % 10;
+
+ std::string cudaVersionString = std::to_string(major) + "." + std::to_string(minor) + "." + std::to_string(patch);
+ //std::cout << "CUDA Runtime Version: " << cudaVersionString << std::endl;
+ return cudaVersionString.c_str();
+#else
+ return nullptr;
+#endif
+}
+
+
+//TODO: implement more function
diff --git a/src/Native/LibTorchSharp/THSCuda.h b/src/Native/LibTorchSharp/THSCuda.h
new file mode 100644
index 000000000..bcc7e2cd6
--- /dev/null
+++ b/src/Native/LibTorchSharp/THSCuda.h
@@ -0,0 +1,49 @@
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+#pragma once
+
+#include "../Stdafx.h"
+#include "Utils.h"
+#include "torch/torch.h"
+
+#ifdef TORCHSHARP_CUDA_TOOLKIT_FOUND
+//#undef CUDA_TOOLKIT_FOUND
+#define CUDA_TOOLKIT_FOUND 1
+#else
+#undef CUDA_TOOLKIT_FOUND
+#endif
+
+/*#define RETURN_CUDA_DEVICE(x) \
+ if(CUDA_TOOLKIT_FOUND) \
+ return x; \
+ else \
+ return -1; */
+
+#ifdef CUDA_TOOLKIT_FOUND
+#include "cuda.h"
+#include "cuda_runtime_api.h"
+
+cudaDeviceProp THSCuda_get_device_prop(int device=0);
+
+inline int show_available_memory()
+{
+ int num_gpus;
+ size_t free, total;
+ cudaGetDeviceCount(&num_gpus);
+ for (int gpu_id = 0; gpu_id < num_gpus; gpu_id++) {
+ cudaSetDevice(gpu_id);
+ int id;
+ cudaGetDevice(&id);
+ cudaMemGetInfo(&free, &total);
+ std::cout << "GPU " << id << " memory: free=" << free << ", total=" << total << std::endl;
+ }
+ return 0;
+}
+#endif
+
+EXPORT_API(int) THSCuda_get_major_compute_capability(int device);
+EXPORT_API(int) THSCuda_get_minor_compute_capability(int device);
+EXPORT_API(int) THSCuda_get_device_count(int* count);
+EXPORT_API(int) THSCuda_get_free_total(int device, int* id, size_t* free, size_t* total);
+EXPORT_API(size_t) THSCuda_get_total_memory(int device);
+EXPORT_API(size_t) THSCuda_get_global_total_memory(int device);
+EXPORT_API(const char*) THSCuda_get_cuda_version();
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSLinearAlgebra.cpp b/src/Native/LibTorchSharp/THSLinearAlgebra.cpp
index 202d3de47..ea0ab8e8e 100644
--- a/src/Native/LibTorchSharp/THSLinearAlgebra.cpp
+++ b/src/Native/LibTorchSharp/THSLinearAlgebra.cpp
@@ -4,9 +4,15 @@
#include
#include
+#define IS_260_OR_NEWER TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR >= 6
+
Tensor THSLinalg_cholesky(const Tensor tensor)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_cholesky(*tensor))
+#else
+ CATCH_TENSOR(torch::linalg::cholesky(*tensor))
+#endif
}
Tensor THSLinalg_cholesky_ex(const Tensor tensor, bool check_errors, Tensor* info)
@@ -29,7 +35,11 @@ Tensor THSLinalg_cond_float(const Tensor tensor, const double p)
Tensor THSLinalg_cond_str(const Tensor tensor, const char* p)
{
+#if IS_260_OR_NEWER
+ CATCH_TENSOR(p != nullptr ? torch::linalg_cond(*tensor, c10::string_view(p)) : torch::linalg_cond(*tensor))
+#else
CATCH_TENSOR(p != nullptr ? torch::linalg_cond(*tensor, p) : torch::linalg_cond(*tensor))
+#endif
}
Tensor THSLinalg_cond_none(const Tensor tensor)
@@ -44,7 +54,11 @@ Tensor THSLinalg_cross(const Tensor input, const Tensor other, const int64_t dim
Tensor THSLinalg_det(const Tensor tensor)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_det(*tensor))
+#else
+ CATCH_TENSOR(torch::linalg::det(*tensor))
+#endif
}
Tensor THSTensor_logdet(const Tensor tensor)
@@ -55,7 +69,11 @@ Tensor THSTensor_logdet(const Tensor tensor)
Tensor THSLinalg_slogdet(const Tensor tensor, Tensor* logabsdet)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_slogdet(*tensor);)
+#else
+ CATCH(res = torch::linalg::slogdet(*tensor);)
+#endif
*logabsdet = ResultTensor(std::get<1>(res));
return ResultTensor(std::get<0>(res));
}
@@ -63,7 +81,11 @@ Tensor THSLinalg_slogdet(const Tensor tensor, Tensor* logabsdet)
Tensor THSLinalg_eig(const Tensor tensor, Tensor* eigenvectors)
{
std::tuple res;
- CATCH(res = torch::linalg_eig(*tensor););
+#if IS_260_OR_NEWER
+ CATCH(res = torch::linalg_eig(*tensor);)
+#else
+ CATCH(res = torch::linalg::eig(*tensor););
+#endif
*eigenvectors = ResultTensor(std::get<1>(res));
return ResultTensor(std::get<0>(res));
}
@@ -93,31 +115,51 @@ Tensor THSLinalg_eigh(const Tensor tensor, const char UPLO, Tensor* eigenvectors
std::string _uplo;
_uplo.push_back(UPLO);
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_eigh(*tensor, _uplo););
+#else
+ CATCH(res = torch::linalg::eigh(*tensor, _uplo););
+#endif
*eigenvectors = ResultTensor(std::get<1>(res));
return ResultTensor(std::get<0>(res));
}
Tensor THSLinalg_eigvals(const Tensor tensor)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_eigvals(*tensor))
+#else
+ CATCH_TENSOR(torch::linalg::eigvals(*tensor))
+#endif
}
Tensor THSLinalg_eigvalsh(const Tensor tensor, const char UPLO)
{
std::string _uplo;
_uplo.push_back(UPLO);
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_eigvalsh(*tensor, _uplo))
+#else
+ CATCH_TENSOR(torch::linalg::eigvalsh(*tensor, _uplo))
+#endif
}
Tensor THSLinalg_householder_product(const Tensor tensor, const Tensor tau)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_householder_product(*tensor, *tau))
+#else
+ CATCH_TENSOR(torch::linalg::householder_product(*tensor, *tau))
+#endif
}
Tensor THSLinalg_inv(const Tensor tensor)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_inv(*tensor))
+#else
+ CATCH_TENSOR(torch::linalg::inv(*tensor))
+#endif
}
Tensor THSLinalg_inv_ex(const Tensor tensor, bool check_errors, Tensor* info)
@@ -131,7 +173,11 @@ Tensor THSLinalg_inv_ex(const Tensor tensor, bool check_errors, Tensor* info)
Tensor THSLinalg_lstsq_none(const Tensor A, const Tensor B, Tensor* residuals, Tensor* rank, Tensor* singular_values)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_lstsq(*A, *B, c10::nullopt, c10::nullopt);)
+#else
+ CATCH(res = torch::linalg::lstsq(*A, *B, c10::nullopt, c10::nullopt);)
+#endif
*residuals = ResultTensor(std::get<1>(res));
*rank = ResultTensor(std::get<2>(res));
*singular_values = ResultTensor(std::get<3>(res));
@@ -141,7 +187,11 @@ Tensor THSLinalg_lstsq_none(const Tensor A, const Tensor B, Tensor* residuals, T
Tensor THSLinalg_lstsq_rcond(const Tensor A, const Tensor B, const double rcond, Tensor* residuals, Tensor* rank, Tensor* singular_values)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_lstsq(*A, *B, rcond, c10::nullopt);)
+#else
+ CATCH(res = torch::linalg::lstsq(*A, *B, rcond, c10::nullopt);)
+#endif
*residuals = ResultTensor(std::get<1>(res));
*rank = ResultTensor(std::get<2>(res));
*singular_values = ResultTensor(std::get<3>(res));
@@ -151,7 +201,11 @@ Tensor THSLinalg_lstsq_rcond(const Tensor A, const Tensor B, const double rcond,
Tensor THSLinalg_lu(const Tensor A, const bool pivot, Tensor* L, Tensor* U)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_lu(*A, pivot);)
+#else
+ CATCH(res = torch::linalg::lu(*A, pivot);)
+#endif
*L = ResultTensor(std::get<1>(res));
*U = ResultTensor(std::get<2>(res));
return ResultTensor(std::get<0>(res));
@@ -160,7 +214,12 @@ Tensor THSLinalg_lu(const Tensor A, const bool pivot, Tensor* L, Tensor* U)
Tensor THSLinalg_lu_factor(const Tensor A, const bool pivot, Tensor* pivots)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_lu_factor(*A, pivot);)
+#else
+ CATCH(res = torch::linalg::lu_factor(*A, pivot);)
+#endif
+
*pivots = ResultTensor(std::get<1>(res));
return ResultTensor(std::get<0>(res));
}
@@ -190,69 +249,111 @@ Tensor THSLinalg_ldl_solve(const Tensor LD, const Tensor pivots, const Tensor B,
Tensor THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef(dim, dim_length);
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::matrix_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef(dim, dim_length);
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, (fronuc == 0) ? "fro" : "nuc", dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::matrix_norm(*tensor, (fronuc == 0) ? "fro" : "nuc", dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim)
{
auto dims = c10::ArrayRef(dim, dim_length);
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_vector_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::vector_norm(*tensor, *ord, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_matrix_rank(const Tensor tensor, const double atol, const bool has_atol, const double rtol, const bool has_rtol, const bool hermitian)
{
auto atol_ = has_atol ? atol : c10::optional();
auto rtol_ = has_rtol ? rtol : c10::optional();
-
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_matrix_rank(*tensor, atol_, rtol_, hermitian))
+#else
+ CATCH_TENSOR(torch::linalg::matrix_rank(*tensor, atol_, rtol_, hermitian))
+#endif
}
Tensor THSLinalg_matrix_rank_tensor(const Tensor tensor, const Tensor atol, const Tensor rtol, const bool hermitian)
{
const c10::optional atol_ = atol != nullptr ? *atol : c10::optional();
const c10::optional rtol_ = rtol != nullptr ? *rtol : c10::optional();
-
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_matrix_rank(*tensor, atol_, rtol_, hermitian))
+#else
+ CATCH_TENSOR(torch::linalg::matrix_rank(*tensor, atol_, rtol_, hermitian))
+#endif
}
Tensor THSLinalg_matrix_power(const Tensor tensor, const int64_t n)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_matrix_power(*tensor, n))
+#else
+ CATCH_TENSOR(torch::linalg::matrix_power(*tensor, n))
+#endif
}
Tensor THSLinalg_multi_dot(const Tensor* tensors, const int length)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_multi_dot(toTensors((torch::Tensor**)tensors, length)))
+#else
+ CATCH_TENSOR(torch::linalg::multi_dot(toTensors((torch::Tensor**)tensors, length)))
+#endif
}
Tensor THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int dim_length, const bool keepdim)
{
c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length));
- CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
+#if IS_260_OR_NEWER
+ CATCH_TENSOR(torch::linalg_norm(*tensor, c10::string_view(p), dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::norm(*tensor, p, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int dim_length, const bool keepdim)
{
c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length));
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::norm(*tensor, p, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_norm_int(const Tensor tensor, const int p, const int64_t* dim, const int dim_length, const bool keepdim)
{
c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length));
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::norm(*tensor, p, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int dim_length, const bool keepdim)
{
c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length));
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_norm(*tensor, c10::nullopt, dims, keepdim, c10::nullopt))
+#else
+ CATCH_TENSOR(torch::linalg::norm(*tensor, c10::nullopt, dims, keepdim, c10::nullopt))
+#endif
}
Tensor THSLinalg_pinv(const Tensor tensor, const double atol, const bool has_atol, const double rtol, const bool has_rtol, const bool hermitian)
@@ -273,7 +374,11 @@ Tensor THSLinalg_pinv_tensor(const Tensor tensor, const Tensor atol, const Tenso
Tensor THSLinalg_pinverse(const Tensor tensor, const double rcond, const bool hermitian)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_pinv(*tensor, rcond, hermitian))
+#else
+ CATCH_TENSOR(torch::linalg::pinv(*tensor, rcond, hermitian))
+#endif
}
Tensor THSLinalg_qr(const Tensor tensor, const char mode, Tensor* R)
@@ -295,31 +400,52 @@ Tensor THSLinalg_qr(const Tensor tensor, const char mode, Tensor* R)
Tensor THSLinalg_solve(const Tensor tensor, Tensor other, bool left)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_solve(*tensor, *other, left))
+#else
+ CATCH_TENSOR(torch::linalg::solve(*tensor, *other, left))
+#endif
+
}
Tensor THSLinalg_solve_ex(const Tensor tensor, Tensor other, bool left, bool check_errors, Tensor* S)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_solve_ex(*tensor, *other, left, check_errors););
+#else
+ CATCH(res = torch::linalg::solve_ex(*tensor, *other, left, check_errors););
+#endif
*S = ResultTensor(std::get<1>(res));
return ResultTensor(std::get<0>(res));
}
Tensor THSLinalg_solve_triangular(const Tensor tensor, Tensor other, bool upper, bool left, bool unitriangular)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_solve_triangular(*tensor, *other, upper, left, unitriangular))
+#else
+ CATCH_TENSOR(torch::linalg::solve_triangular(*tensor, *other, upper, left, unitriangular))
+#endif
}
Tensor THSLinalg_solve_triangular_out(const Tensor tensor, Tensor other, bool upper, bool left, bool unitriangular, Tensor result)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_solve_triangular_out(*result, *tensor, *other, upper, left, unitriangular))
+#else
+ CATCH_TENSOR(torch::linalg::solve_triangular_out(*result, *tensor, *other, upper, left, unitriangular))
+#endif
}
Tensor THSLinalg_svd(const Tensor tensor, const bool full_matrices, Tensor* S, Tensor* Vh)
{
std::tuple res;
+#if IS_260_OR_NEWER
CATCH(res = torch::linalg_svd(*tensor, full_matrices, c10::nullopt););
+#else
+ CATCH(res = torch::linalg::svd(*tensor, full_matrices, c10::nullopt););
+#endif
*S = ResultTensor(std::get<1>(res));
*Vh = ResultTensor(std::get<2>(res));
return ResultTensor(std::get<0>(res));
@@ -327,18 +453,30 @@ Tensor THSLinalg_svd(const Tensor tensor, const bool full_matrices, Tensor* S, T
Tensor THSLinalg_svdvals(const Tensor tensor)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(res = torch::linalg_svdvals(*tensor, c10::nullopt))
+#else
+ CATCH_TENSOR(res = torch::linalg::svdvals(*tensor, c10::nullopt))
+#endif
}
Tensor THSLinalg_tensorinv(const Tensor tensor, const int64_t ind)
{
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_tensorinv(*tensor, ind))
+#else
+ CATCH_TENSOR(torch::linalg::tensorinv(*tensor, ind))
+#endif
}
Tensor THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int dim_length)
{
c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length));
+#if IS_260_OR_NEWER
CATCH_TENSOR(torch::linalg_tensorsolve(*tensor, *other, dims))
+#else
+ CATCH_TENSOR(torch::linalg::tensorsolve(*tensor, *other, dims))
+#endif
}
Tensor THSLinalg_vander(const Tensor tensor, const int64_t N)
diff --git a/src/Native/LibTorchSharp/THSNN.cpp b/src/Native/LibTorchSharp/THSNN.cpp
index 516b6ce54..2c0af81a0 100644
--- a/src/Native/LibTorchSharp/THSNN.cpp
+++ b/src/Native/LibTorchSharp/THSNN.cpp
@@ -1069,4 +1069,58 @@ Tensor THSNN_scaled_dot_product_attention(const Tensor query, const Tensor key,
auto mask = attention_mask == nullptr ? c10::nullopt : c10::optional(*attention_mask);
CATCH_TENSOR(torch::scaled_dot_product_attention(*query, *key, *value, mask, p, casual));
+}
+
+Tensor THSNN_normalize(Tensor input, float p, const int64_t* dim, float eps, Tensor out)
+{
+ auto opts = torch::nn::functional::NormalizeFuncOptions().p(p).eps(eps).dim(*dim);
+ CATCH_TENSOR(torch::nn::functional::normalize(*input, opts))
+ //CATCH_TENSOR(torch::scaled_dot_product_attention(*query, *key, *value, mask, p, casual));
+}
+
+void THSNN_Print_Module(const NNModule module) {
+ std::ostringstream oss;
+ const std::string name = module->get()->name();
+ oss << name << "(";
+ if (auto* conv2 = (*module)->as())
+ {
+ const auto opt = &conv2->options;
+ oss << opt->in_channels() << "," << opt->out_channels() << ", K=" << opt->kernel_size();
+ oss << ", S=" << opt->stride() << ", P=" << opt->padding().index() << ", D=" << opt->dilation();
+ oss << ", G=" << opt->groups() << ", B=" << opt->bias();
+ }
+ if (auto* bn2 = (*module)->as()) {
+ const auto opt = &bn2->options;
+ oss << opt->num_features() << ", Eps=" << opt->eps() << ", M=" << (opt->momentum().has_value() ? std::to_string(opt->momentum().value()) : "NaN");
+ oss << ", A=" << opt->affine() << ", T=" << opt->track_running_stats();
+ }
+ if(auto* ln = (*module)->as()) //This not printed because the TorchSharp not have a ctor of LayerNorm
+ {
+ const auto opt = ln->options;
+ oss << opt.eps() << ", Elem=" << opt.elementwise_affine() << ", N=[";
+ for(int64_t i=0;i< static_cast(opt.normalized_shape().size());i++)
+ oss << opt.normalized_shape()[i] << ((i == static_cast(opt.normalized_shape().size()-1)) ? "]" : ",");
+ }
+ if (const auto* d2 = (*module)->as()) //This not printed because the TorchSharp not have a ctor of Dropout2d
+ {
+ auto opt = d2->options;
+ oss << opt.p() << ", Inplace=" << opt.inplace();
+ }
+ if(auto* avp2 = (*module)->as())
+ {
+ const auto opt = &avp2->options;
+ oss << "[";
+ for (int64_t i = 0; i < opt->output_size().size(); i++)
+ oss << opt->output_size()->at(i).value() << ((i == opt->output_size().size() - 1) ? "]" : ",");
+ }
+ if (auto* amp2 = (*module)->as())
+ {
+ const auto opt = &2->options;
+ oss << "[";
+ for (int64_t i = 0; i < opt->output_size().size(); i++)
+ oss << opt->output_size()->at(i).value() << ((i == opt->output_size().size() - 1) ? "]" : ",");
+ }
+
+ oss << ")";
+ std::cout << oss.str() << std::endl;
}
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSNN.h b/src/Native/LibTorchSharp/THSNN.h
index 6cf1c32c9..021d7af98 100644
--- a/src/Native/LibTorchSharp/THSNN.h
+++ b/src/Native/LibTorchSharp/THSNN.h
@@ -37,9 +37,147 @@ EXPORT_API(void) THSNN_AnyModule_dispose(const NNAnyModule module);
EXPORT_API(NNModule) THSNN_custom_module(const char* name, Tensor(*forward)(Tensor), NNAnyModule* outAsAnyModule);
+// Pooling
+
+EXPORT_API(NNModule) THSNN_MaxPool1d_ctor(const int64_t* kernelSize, const int64_t* stride, const int64_t* padding, const int64_t* dilation, bool ceil_mode, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxPool1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_MaxPool1d_forward_with_indices(const NNModule module, const Tensor tensor, Tensor *indices);
+
+EXPORT_API(NNModule) THSNN_MaxPool2d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, const int64_t* dilation, const int dilationLength, bool ceil_mode, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxPool2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_MaxPool2d_forward_with_indices(const NNModule module, const Tensor tensor, Tensor* indices);
+
+EXPORT_API(NNModule) THSNN_MaxPool3d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, const int64_t* dilation, const int dilationLength, bool ceil_mode, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxPool3d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_MaxPool3d_forward_with_indices(const NNModule module, const Tensor tensor, Tensor* indices);
+
+EXPORT_API(NNModule) THSNN_FractionalMaxPool2d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_FractionalMaxPool2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_FractionalMaxPool2d_forward_with_indices(const NNModule module, const Tensor tensor, Tensor* indices);
+
+EXPORT_API(NNModule) THSNN_FractionalMaxPool3d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_FractionalMaxPool3d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_FractionalMaxPool3d_forward_with_indices(const NNModule module, const Tensor tensor, Tensor* indices);
+
+EXPORT_API(NNModule) THSNN_MaxUnpool1d_ctor(const int64_t* kernelSize, const int64_t* stride, const int64_t* padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxUnpool1d_forward(const NNModule module, const Tensor tensor, const Tensor indices, const int64_t* outputSize);
+
+EXPORT_API(NNModule) THSNN_MaxUnpool2d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxUnpool2d_forward(const NNModule module, const Tensor tensor, const Tensor indices, const int64_t* outputSize, const int outputSizeLength);
+
+EXPORT_API(NNModule) THSNN_MaxUnpool3d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_MaxUnpool3d_forward(const NNModule module, const Tensor tensor, const Tensor indices, const int64_t* outputSize, const int outputSizeLength);
+
+EXPORT_API(NNModule) THSNN_AdaptiveAvgPool1d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveAvgPool1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AdaptiveAvgPool2d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveAvgPool2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AdaptiveAvgPool3d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveAvgPool3d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_AdaptiveMaxPool1d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveMaxPool1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AdaptiveMaxPool2d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveMaxPool2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AdaptiveMaxPool3d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AdaptiveMaxPool3d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_AvgPool1d_ctor(const int64_t* kernelSize, const int64_t* stride, const int64_t* padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AvgPool1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AvgPool2d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, bool ceil_mode, bool count_include_pad, int64_t divisor_override, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AvgPool2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_AvgPool3d_ctor(const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const int64_t* padding, const int paddingLength, bool ceil_mode, bool count_include_pad, int64_t divisor_override, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_AvgPool3d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_LPPool1d_ctor(double norm_type, const int64_t* kernelSize, const int64_t* stride, bool ceil_mode, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_LPPool1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_LPPool2d_ctor(double norm_type, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, bool ceil_mode, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_LPPool2d_forward(const NNModule module, const Tensor tensor);
+
+// Padding
+
+EXPORT_API(NNModule) THSNN_ZeroPad2d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ZeroPad2d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ZeroPad2d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_ConstantPad1d_ctor(const double value, const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ConstantPad1d_ctor_tuple(const double value, const int64_t padding_left, const int64_t padding_right, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConstantPad1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ConstantPad2d_ctor(const double value, const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ConstantPad2d_ctor_tuple(const double value, const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConstantPad2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ConstantPad3d_ctor(const double value, const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ConstantPad3d_ctor_tuple(const double value, const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, const int64_t padding_front, const int64_t padding_back, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConstantPad3d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_ReplicationPad1d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReplicationPad1d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReplicationPad1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ReplicationPad2d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReplicationPad2d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReplicationPad2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ReplicationPad3d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReplicationPad3d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, const int64_t padding_front, const int64_t padding_back, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReplicationPad3d_forward(const NNModule module, const Tensor tensor);
+
+EXPORT_API(NNModule) THSNN_ReflectionPad1d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReflectionPad1d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReflectionPad1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ReflectionPad2d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReflectionPad2d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReflectionPad2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ReflectionPad3d_ctor(const int64_t padding, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ReflectionPad3d_ctor_tuple(const int64_t padding_left, const int64_t padding_right, const int64_t padding_top, const int64_t padding_bottom, const int64_t padding_front, const int64_t padding_back, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReflectionPad3d_forward(const NNModule module, const Tensor tensor);
+
+// Convolution
+
+EXPORT_API(NNModule) THSNN_Conv1d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Conv1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_Conv1d_bias(const NNModule module);
+EXPORT_API(void) THSNN_Conv1d_set_bias(const NNModule module, const Tensor bias);
+EXPORT_API(Tensor) THSNN_Conv1d_weight(const NNModule module);
+EXPORT_API(void) THSNN_Conv1d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(NNModule) THSNN_Conv2d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_Conv2d_ctor_1(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelX, const int64_t kernelY, const int64_t strideX, const int64_t strideY, const int64_t paddingX, const int64_t paddingY, const int64_t dilationX, const int64_t dilationY, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Conv2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_Conv2d_weight(const NNModule module);
+EXPORT_API(void) THSNN_Conv2d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(Tensor) THSNN_Conv2d_bias(const NNModule module);
+EXPORT_API(void) THSNN_Conv2d_set_bias(const NNModule module, const Tensor bias);
+//EXPORT_API(void) THSNN_Conv2d_print_options(const NNModule module);
+EXPORT_API(NNModule) THSNN_Conv3d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_Conv3d_ctor_1(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelX, const int64_t kernelY, const int64_t kernelZ, const int64_t strideX, const int64_t strideY, const int64_t strideZ, const int64_t paddingX, const int64_t paddingY, const int64_t paddingZ, const int64_t dilationX, const int64_t dilationY, const int64_t dilationZ, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Conv3d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_Conv3d_weight(const NNModule module);
+EXPORT_API(void) THSNN_Conv3d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(Tensor) THSNN_Conv3d_bias(const NNModule module);
+EXPORT_API(void) THSNN_Conv3d_set_bias(const NNModule module, const Tensor bias);
+
+EXPORT_API(NNModule) THSNN_ConvTranspose1d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t output_padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConvTranspose1d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_ConvTranspose1d_bias(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose1d_set_bias(const NNModule module, const Tensor bias);
+EXPORT_API(Tensor) THSNN_ConvTranspose1d_weight(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose1d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(NNModule) THSNN_ConvTranspose2d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t output_padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ConvTranspose2d_ctor_1(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelX, const int64_t kernelY, const int64_t strideX, const int64_t strideY, const int64_t paddingX, const int64_t paddingY, const int64_t output_paddingX, const int64_t output_paddingY, const int64_t dilationX, const int64_t dilationY, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConvTranspose2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_ConvTranspose2d_weight(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose2d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(Tensor) THSNN_ConvTranspose2d_bias(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose2d_set_bias(const NNModule module, const Tensor bias);
+EXPORT_API(NNModule) THSNN_ConvTranspose3d_ctor(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelSize, const int64_t stride, const int64_t padding, const int64_t output_padding, const int64_t dilation, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(NNModule) THSNN_ConvTranspose3d_ctor_1(const int64_t inputChannel, const int64_t outputChannel, const int64_t kernelX, const int64_t kernelY, const int64_t kernelZ, const int64_t strideX, const int64_t strideY, const int64_t strideZ, const int64_t paddingX, const int64_t paddingY, const int64_t paddingZ, const int64_t output_paddingX, const int64_t output_paddingY, const int64_t output_paddingZ, const int64_t dilationX, const int64_t dilationY, const int64_t dilationZ, const int64_t paddingMode, const int64_t groups, const bool bias, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ConvTranspose3d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_ConvTranspose3d_weight(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose3d_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(Tensor) THSNN_ConvTranspose3d_bias(const NNModule module);
+EXPORT_API(void) THSNN_ConvTranspose3d_set_bias(const NNModule module, const Tensor bias);
+
// Normalization
-EXPORT_API(Tensor) THSNN_normalize(const Tensor input, const double p, const int64_t dim, const double eps);
+//EXPORT_API(Tensor) THSNN_normalize(const Tensor input, const double p, const int64_t dim, const double eps);
EXPORT_API(Tensor) THSNN_batch_norm(const Tensor input, const Tensor running_mean, const Tensor running_var, const Tensor weight, const Tensor bias, const bool training, const double momentum, const double eps);
EXPORT_API(Tensor) THSNN_group_norm(const Tensor input, int64_t num_groups, const Tensor weight, const Tensor bias, const double eps);
EXPORT_API(Tensor) THSNN_instance_norm(const Tensor input, const Tensor running_mean, const Tensor running_var, const Tensor weight, const Tensor bias, const bool use_input_stats, const double momentum, const double eps);
@@ -75,6 +213,61 @@ EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, co
EXPORT_API(Tensor) THSNN_grid_sample(const Tensor input, const Tensor grid, const int8_t mode, const int8_t padding_mode, const int8_t align_corners);
EXPORT_API(Tensor) THSNN_affine_grid(const Tensor theta, const int64_t* size, const int size_len, const bool align_corners);
+// Activation functions
+
+EXPORT_API(NNModule) THSNN_CELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_CELU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ELU_ctor(const double alpha, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ELU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_GELU_ctor(NNAnyModule* outAsAnyModule, const char* approximate);
+EXPORT_API(Tensor) THSNN_GELU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_GLU_ctor(const int64_t dim, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_GLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Hardshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Hardshrink_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Hardtanh_ctor(const double min_val, const double max_val, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Hardtanh_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_LeakyReLU_ctor(const double negative_sloope, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_LeakyReLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Mish_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Mish_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_PReLU_ctor(const int64_t nparams, const double init, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_PReLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(Tensor) THSNN_PReLU_weight(const NNModule module);
+EXPORT_API(void) THSNN_PReLU_set_weight(const NNModule module, const Tensor weight);
+EXPORT_API(NNModule) THSNN_ReLU_ctor(bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_ReLU6_ctor(bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_ReLU6_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_RReLU_ctor(const double lower, const double upper, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_RReLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_LogSoftmax_ctor(int64_t dim, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_LogSoftmax_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_SELU_ctor(bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_SELU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Sigmoid_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Sigmoid_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_SiLU_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_SiLU_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softmax_ctor(const int64_t dim, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softmax_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softmax2d_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softmax2d_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softmin_ctor(const int64_t dim, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softmin_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softplus_ctor(const double beta, const double threshold, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softplus_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softshrink_ctor(const double lambda, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softshrink_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Softsign_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Softsign_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Tanh_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Tanh_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Tanhshrink_ctor(NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Tanhshrink_forward(const NNModule module, const Tensor tensor);
+EXPORT_API(NNModule) THSNN_Threshold_ctor(const double threshold, const double value, const bool inplace, NNAnyModule* outAsAnyModule);
+EXPORT_API(Tensor) THSNN_Threshold_forward(const NNModule module, const Tensor tensor);
+
// Sparse
EXPORT_API(NNModule) THSNN_Embedding_ctor(const int64_t num_embeddings, const int64_t embedding_dims, const int64_t padding_idx, bool has_pi, const double max_norm, const bool has_mn, const double norm_type, const bool scale_grad_by_freq, const bool sparse, NNAnyModule* outAsAnyModule);
@@ -230,6 +423,7 @@ EXPORT_API(Tensor) THSNN_pairwise_distance(const Tensor input1, const Tensor inp
EXPORT_API(Tensor) THSNN_scaled_dot_product_attention(const Tensor query, const Tensor key, const Tensor value, const Tensor attention_mask, double p, bool casual);
+EXPORT_API(Tensor) THSNN_normalize(const Tensor input, float p, const int64_t* dim, float eps, Tensor out);
// Initializers
EXPORT_API(void) THSNN_initUniform(Tensor twrapper, double low, double high);
@@ -246,3 +440,7 @@ EXPORT_API(PackedSequence) THSNN_pack_padded_sequence(Tensor input, Tensor lengt
EXPORT_API(void) THSNN_pad_packed_sequence(PackedSequence sequence, bool batch_first, double padding_value, int64_t total_length, Tensor* res1, Tensor* res2);
EXPORT_API(Tensor) THSNN_pad_sequence(const Tensor* sequences, const int sequences_len, bool batch_first, double padding_value);
EXPORT_API(PackedSequence) THSNN_pack_sequence(const Tensor* sequences, int sequences_len, bool enforce_sorted);
+
+
+// Printer Modules
+EXPORT_API(void) THSNN_Print_Module(const NNModule module);
diff --git a/src/Native/LibTorchSharp/THSStorage.cpp b/src/Native/LibTorchSharp/THSStorage.cpp
index c966e0e97..4bc8b84e9 100644
--- a/src/Native/LibTorchSharp/THSStorage.cpp
+++ b/src/Native/LibTorchSharp/THSStorage.cpp
@@ -23,3 +23,26 @@ void* THSStorage_data_ptr(const Tensor tensor)
return dp.get();
}
+/*
+int* THSStorage_tensor_to_array_int(const Tensor tensor)
+{
+ return THSStorage_tensor_array(tensor);
+}
+long* THSStorage_tensor_to_array_long(const Tensor tensor)
+{
+ return THSStorage_tensor_array(tensor);
+}
+
+float* THSStorage_tensor_to_array_float(const Tensor tensor)
+{
+ return THSStorage_tensor_array(tensor);
+}
+
+double* THSStorage_tensor_to_array_double(const Tensor tensor)
+{
+ return THSStorage_tensor_array(tensor);
+}
+char* THSStorage_tensor_to_array_char(const Tensor tensor)
+{
+ return THSStorage_tensor_array(tensor);
+}*/
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSStorage.h b/src/Native/LibTorchSharp/THSStorage.h
index e66492e11..53a335921 100644
--- a/src/Native/LibTorchSharp/THSStorage.h
+++ b/src/Native/LibTorchSharp/THSStorage.h
@@ -14,3 +14,19 @@ EXPORT_API(size_t) THSStorage_nbytes(const Tensor tensor);
EXPORT_API(void) THSStorage_set_nbytes(const Tensor tensor, size_t nbytes);
EXPORT_API(void*) THSStorage_data_ptr(const Tensor tensor);
+/*
+template
+T* THSStorage_tensor_array(const Tensor tensor)
+{
+#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 4
+ return tensor->data_ptr();
+#else
+ return tensor->data();
+#endif
+}
+
+EXPORT_API(int*) THSStorage_tensor_to_array_int(const Tensor tensor);
+EXPORT_API(long*) THSStorage_tensor_to_array_long(const Tensor tensor);
+EXPORT_API(float*) THSStorage_tensor_to_array_float(const Tensor tensor);
+EXPORT_API(double*) THSStorage_tensor_to_array_double(const Tensor tensor);
+EXPORT_API(char*) THSStorage_tensor_to_array_char(const Tensor tensor);*/
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSTensor.cpp b/src/Native/LibTorchSharp/THSTensor.cpp
index a001045fc..4bb35a6ad 100644
--- a/src/Native/LibTorchSharp/THSTensor.cpp
+++ b/src/Native/LibTorchSharp/THSTensor.cpp
@@ -404,6 +404,11 @@ void* THSTensor_data(const Tensor tensor)
CATCH_RETURN(void*, nullptr, tensor->data_ptr());
}
+void* THSTensor_raw_data(const Tensor tensor)
+{
+ return THSTensor_data(tensor);
+}
+
float THSTensor_data_idx_float16(const Tensor tensor, const int64_t i)
{
CATCH_RETURN(float, 0.0f, (float)(tensor->data_ptr())[i]);
@@ -832,6 +837,21 @@ void THSTensor_index_put_(Tensor tensor,
auto indices = at::ArrayRef(indicesVec.data(), indicesVec.size());
CATCH(tensor->index_put_(indices, *value););
}
+/*void THSTensor_index_put_accumulate_(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value,
+ bool accumulate)
+{
+ at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex));
+ memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex));
+ completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength);
+ auto indices = at::ArrayRef(indicesArray, indicesLength);
+ CATCH(tensor->index_put_({ indices }, *value, accumulate););
+}*/
void THSTensor_index_put_(Tensor tensor,
const int64_t* indexStarts,
@@ -869,6 +889,37 @@ void THSTensor_index_put_scalar_(Tensor tensor,
CATCH(tensor->index_put_(indices, *value););
}
+/*Tensor THSTensor_index_put(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value)
+{
+ at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex));
+ memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex));
+ completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength);
+ auto indices = at::ArrayRef(indicesArray, indicesLength);
+ CATCH_TENSOR(tensor->index_put(indices, *value););
+}*/
+
+/*Tensor THSTensor_index_put_accumulate(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value,
+ bool accumulate)
+{
+ at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex));
+ memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex));
+ completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength);
+ auto indices = at::ArrayRef(indicesArray, indicesLength);
+ CATCH_TENSOR(tensor->index_put({ indices }, *value, accumulate););
+}*/
+
Tensor THSTensor_index_select(Tensor tensor, int64_t dim, Tensor index)
{
CATCH_TENSOR(tensor->index_select(dim, *index));
@@ -1267,6 +1318,11 @@ Tensor THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int le
CATCH_TENSOR(tensor->reshape(at::ArrayRef(shape, length)));
}
+void THSTensor_resize_(const Tensor tensor, const int64_t* shape, const int length)
+{
+ CATCH(tensor->resize_(at::ArrayRef(shape, length)););
+}
+
Tensor THSTensor_rot90(const Tensor tensor, const int64_t k, const int64_t dim1, const int64_t dim2)
{
CATCH_TENSOR(tensor->rot90(k, { dim1, dim2 }));
@@ -1897,6 +1953,21 @@ Tensor THSTensor_to_type_and_device(const Tensor tensor, int8_t scalar_type, con
);
}
+/*Tensor THSTensor_device_and_non_blocking(const Tensor tensor, const int device_type, const int device_index, const bool non_blocking)
+{
+ CATCH_RETURN_Tensor(
+ auto device = c10::Device((c10::DeviceType)device_type, (c10::DeviceIndex)device_index);
+ res = ResultTensor(tensor->to(device, non_blocking, at::ScalarType(scalar_type), false));
+ );
+}*/
+Tensor THSTensor_to_type_and_device_and_non_blocking(const Tensor tensor, int8_t scalar_type, const int device_type, const int device_index,const bool non_blocking)
+{
+ CATCH_RETURN_Tensor(
+ auto device = c10::Device((c10::DeviceType)device_type, (c10::DeviceIndex)device_index);
+ res = ResultTensor(tensor->to(device, at::ScalarType(scalar_type),non_blocking, false));
+ );
+}
+
Tensor THSTensor_triu(const Tensor tensor, const int64_t diagonal, const bool inplace)
{
CATCH_TENSOR(inplace ? tensor->triu_(diagonal) : tensor->triu(diagonal));
@@ -2284,6 +2355,19 @@ Tensor THSTensor_unflatten_names(Tensor tensor, const char** names, const int64_
return nullptr;
}
+bool THSTensor_is_coalesce(Tensor tensor)
+{
+ return tensor->is_coalesced();
+}
+
+Tensor THSTensor_coalesce(Tensor tensor)
+{
+ CATCH(
+ return ResultTensor(tensor->coalesce());
+ );
+ return nullptr;
+}
+
Tensor THSTensor_quantize_per_tensor(const Tensor tensor, double scale, int64_t zero_point, int8_t scalar_type)
{
CATCH_TENSOR(torch::quantize_per_tensor(*tensor, scale, zero_point, at::ScalarType(scalar_type)));
diff --git a/src/Native/LibTorchSharp/THSTensor.h b/src/Native/LibTorchSharp/THSTensor.h
index 73bff0403..ea55732e2 100644
--- a/src/Native/LibTorchSharp/THSTensor.h
+++ b/src/Native/LibTorchSharp/THSTensor.h
@@ -1,4 +1,4 @@
-// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
+// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information.
#pragma once
#include "../Stdafx.h"
@@ -395,6 +395,8 @@ EXPORT_API(Tensor) THSTensor_cumsum(const Tensor tensor, const int64_t dim, bool
EXPORT_API(void*) THSTensor_data(const Tensor tensor);
+EXPORT_API(void*) THSTensor_raw_data(const Tensor tensor);
+
EXPORT_API(float) THSTensor_data_idx_float16(const Tensor tensor, const int64_t i);
EXPORT_API(float) THSTensor_data_idx_bfloat16(const Tensor tensor, const int64_t i);
@@ -672,6 +674,7 @@ EXPORT_API(void) THSTensor_index_copy_(const Tensor tensor, const int64_t dim, c
EXPORT_API(Tensor) THSTensor_index_fill(const Tensor tensor, const int64_t dim, const Tensor index, const Scalar value);
EXPORT_API(void) THSTensor_index_fill_(const Tensor tensor, const int64_t dim, const Tensor index, const Scalar value);
+
EXPORT_API(Tensor) THSTensor_indices(Tensor tensor);
EXPORT_API(Tensor) THSTensor_index(Tensor tensor,
@@ -681,6 +684,14 @@ EXPORT_API(Tensor) THSTensor_index(Tensor tensor,
const Tensor* indexTensors,
const int indicesLength);
+EXPORT_API(void) THSTensor_index_put_(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value);
+
EXPORT_API(void) THSTensor_index_put_scalar_(Tensor tensor,
const int64_t* indexStarts,
const int64_t* indexEnds,
@@ -689,14 +700,31 @@ EXPORT_API(void) THSTensor_index_put_scalar_(Tensor tensor,
const int indicesLength,
const Scalar value);
-EXPORT_API(void) THSTensor_index_put_(Tensor tensor,
+/*EXPORT_API(void) THSTensor_index_put_accumulate_(Tensor tensor,
const int64_t* indexStarts,
const int64_t* indexEnds,
const int64_t* indexSteps,
const Tensor* indexTensors,
const int indicesLength,
const Tensor value,
- const bool accumulate = false);
+ bool accumulate);*/
+
+/*EXPORT_API(Tensor) THSTensor_index_put(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value);
+*/
+/*EXPORT_API(Tensor) THSTensor_index_put_accumulate(Tensor tensor,
+ const int64_t* indexStarts,
+ const int64_t* indexEnds,
+ const int64_t* indexSteps,
+ const Tensor* indexTensors,
+ const int indicesLength,
+ const Tensor value,
+ bool accumulate);*/
EXPORT_API(Tensor) THSTensor_index_select(Tensor tensor, int64_t dim, Tensor index);
@@ -1167,6 +1195,8 @@ EXPORT_API(int) THSTensor_requires_grad(const Tensor tensor);
EXPORT_API(Tensor) THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int length);
+EXPORT_API(void) THSTensor_resize_(const Tensor tensor, const int64_t* shape, const int length);
+
EXPORT_API(Tensor) THSTensor_roll(const Tensor tensor, const int64_t* shifts, const int shLength, const int64_t* dims, const int dimLength);
EXPORT_API(Tensor) THSTensor_rot90(const Tensor tensor, const int64_t k, const int64_t dim1, const int64_t dim2);
@@ -1402,6 +1432,10 @@ EXPORT_API(Tensor) THSTensor_to_type(const Tensor tensor, int8_t scalar_type, co
EXPORT_API(Tensor) THSTensor_to_type_and_device(const Tensor tensor, int8_t scalar_type, const int device_type, const int device_index, const bool copy, const bool non_blocking);
+//EXPORT_API(Tensor) THSTensor_device_and_non_blocking(const Tensor tensor, const int device_type, const int device_index, const bool non_blocking);
+
+EXPORT_API(Tensor) THSTensor_to_type_and_device_and_non_blocking(const Tensor tensor, int8_t scalar_type, const int device_type, const int device_index, const bool non_blocking);
+
EXPORT_API(void) THSTensor_topk(const Tensor tensor, Tensor* (*allocator)(size_t length), const int k, const int64_t dim, const bool largest, const bool sorted);
EXPORT_API(Tensor) THSTensor_trunc(const Tensor tensor);
@@ -1797,7 +1831,6 @@ EXPORT_API(Tensor) THSTensor_fftshift(const Tensor tensor, const int64_t* dim, c
EXPORT_API(Tensor) THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int dim_length);
-
// Spectral Ops
EXPORT_API(Tensor) THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad);
@@ -1820,3 +1853,6 @@ EXPORT_API(Tensor) THSTensor_int_repr(const Tensor tensor);
EXPORT_API(Tensor) THSTensor_q_per_channel_scales(const Tensor tensor);
EXPORT_API(Tensor) THSTensor_q_per_channel_zero_points(const Tensor tensor);
EXPORT_API(int64_t) THSTensor_q_per_channel_axis(const Tensor tensor);
+
+EXPORT_API(Tensor) THSTensor_coalesce(const Tensor x);
+EXPORT_API(bool) THSTensor_is_coalesce(const Tensor x);
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSTorch.cpp b/src/Native/LibTorchSharp/THSTorch.cpp
index ef27842c6..d439421c7 100644
--- a/src/Native/LibTorchSharp/THSTorch.cpp
+++ b/src/Native/LibTorchSharp/THSTorch.cpp
@@ -4,6 +4,11 @@
#include "torch/torch.h"
#include "torch/cuda.h"
+const char* THSTorch_libtorch_version()
+{
+ return TORCH_VERSION;
+}
+
void THSTorch_manual_seed(const int64_t seed)
{
torch::manual_seed(seed);
@@ -53,7 +58,12 @@ void THSBackend_cudnn_set_allow_tf32(const bool flag)
bool THSBackend_cuda_get_allow_fp16_reduced_precision_reduction()
{
auto result = false;
- CATCH(result = at::globalContext().allowFP16ReductionCuBLAS() == at::CuBLASReductionOption::AllowReducedPrecisionWithSplitK;);
+#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 11
+ CATCH(result = at::globalContext().allowFP16ReductionCuBLAS()==at::CuBLASReductionOption::AllowReducedPrecisionWithSplitK;);
+#else
+ CATCH(result = at::globalContext().allowFP16ReductionCuBLAS(););
+#endif
+
return result;
}
@@ -117,6 +127,7 @@ Generator THSGenerator_new(uint64_t seed, int64_t device, int64_t index)
{
// TODO: Support creation of GPU RNGs. 'device' and 'index' are in the
// function signature in preparation thereof.
+ //auto dl = std::make_shared(c10::Device(c10::DeviceType::CUDA, device), c10::DispatchKeySet()).get();
return new at::Generator(at::detail::createCPUGenerator(seed));
}
@@ -207,6 +218,7 @@ Scalar THSTorch_int32_to_scalar(int32_t value)
Scalar THSTorch_int64_to_scalar(int64_t value)
{
return new torch::Scalar(value);
+ //return new torch::Scalar(static_cast(value));
}
Scalar THSTorch_float32_to_scalar(float value)
@@ -221,12 +233,12 @@ Scalar THSTorch_float64_to_scalar(double value)
Scalar THSTorch_float16_to_scalar(float value)
{
- return new torch::Scalar((c10::Half)value);
+ return new torch::Scalar(static_cast(value));
}
Scalar THSTorch_bfloat16_to_scalar(float value)
{
- return new torch::Scalar((c10::BFloat16)value);
+ return new torch::Scalar(static_cast(value));
}
Scalar THSTorch_bool_to_scalar(bool value)
@@ -289,6 +301,12 @@ void THSTorch_scalar_to_float16(Scalar value, unsigned short *res)
*res = value->toHalf().x;
}
+
+/*void THSTorch_scalar_to_bfloat16(Scalar value, c10::BFloat16* res)
+{
+ *res = value->toBFloat16();
+}*/
+
void THSTorch_scalar_to_complex32(Scalar value, float* real, float* imaginary)
{
auto result = value->toComplexFloat();
@@ -326,4 +344,10 @@ double THSSpecial_erf_scalar(const double x)
double THSSpecial_erfc_scalar(const double x)
{
return erfc(x);
-}
\ No newline at end of file
+}
+
+
+/*bool THSTorch_jit_is_scripting()
+{
+
+}*/
\ No newline at end of file
diff --git a/src/Native/LibTorchSharp/THSTorch.h b/src/Native/LibTorchSharp/THSTorch.h
index bad8e073a..9e6acb0eb 100644
--- a/src/Native/LibTorchSharp/THSTorch.h
+++ b/src/Native/LibTorchSharp/THSTorch.h
@@ -4,9 +4,11 @@
#include "../Stdafx.h"
#include "Utils.h"
-
+#include
+//#include
// API.
+EXPORT_API(const char*) THSTorch_libtorch_version();
// Sets manually the seed.
EXPORT_API(void) THSTorch_manual_seed(const int64_t seed);
EXPORT_API(void) THSCuda_manual_seed(const int64_t seed);
@@ -79,6 +81,7 @@ EXPORT_API(bool) THSTorch_scalar_to_bool(Scalar value);
EXPORT_API(void) THSTorch_scalar_to_bfloat16(Scalar value, unsigned short* res);
EXPORT_API(void) THSTorch_scalar_to_float16(Scalar value, unsigned short* res);
+//EXPORT_API(void) THSTorch_scalar_to_bfloat16(Scalar value, c10::BFloat16* res);
EXPORT_API(void) THSTorch_scalar_to_complex32(Scalar value, float* real, float* imaginary);
EXPORT_API(void) THSTorch_scalar_to_complex64(Scalar value, double* real, double* imaginary);
@@ -92,3 +95,4 @@ EXPORT_API(void) THSTorch_dispose_scalar(Scalar scalar);
EXPORT_API(double) THSSpecial_erf_scalar(const double x);
EXPORT_API(double) THSSpecial_erfc_scalar(const double x);
+
diff --git a/src/Native/LibTorchSharp/THSVision.cpp b/src/Native/LibTorchSharp/THSVision.cpp
index 5fd3ecdcf..532362556 100644
--- a/src/Native/LibTorchSharp/THSVision.cpp
+++ b/src/Native/LibTorchSharp/THSVision.cpp
@@ -51,7 +51,7 @@ void _hsv_to_rgb(at::Tensor& h, at::Tensor& s, at::Tensor& v, at::Tensor& img)
auto i = torch::floor(h6);
auto f = h6 - i;
i = i.to(at::ScalarType::Int) % 6;
-
+
auto p = torch::clamp((v * (1.0f - s)), 0.0, 1.0);
auto q = torch::clamp((v * (1.0 - s * f)), 0.0, 1.0);
auto t = torch::clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0);
diff --git a/src/Native/LibTorchSharp/Utils.h b/src/Native/LibTorchSharp/Utils.h
index 4c3606491..42573753b 100644
--- a/src/Native/LibTorchSharp/Utils.h
+++ b/src/Native/LibTorchSharp/Utils.h
@@ -2,9 +2,8 @@
#pragma once
#include
-
#include "torch/torch.h"
-
+#include
extern thread_local char *torch_last_err;
typedef torch::Tensor *Tensor;
@@ -59,8 +58,24 @@ struct TensorArray {
// Return undefined tensors as nullptr to C#
inline Tensor ResultTensor(const at::Tensor & res)
{
- if (res.defined())
+ if (res.defined()) {
+
+ //TODO: Autocast here only if is INNER-SCOPE
+
+ /*at::Tensor* resT = new torch::Tensor(res);
+ if (at::autocast::is_autocast_cache_enabled()){
+ if (res.is_cuda()) {
+ ::std::cout << "IS CUDA" << std::endl;
+ resT->to(at::autocast::get_autocast_gpu_dtype());
+ }
+ if (res.is_cpu()) {
+ ::std::cout << "IS CPU" << std::endl;
+ resT->to(at::autocast::get_autocast_cpu_dtype());
+ }
+ }
+ return resT;*/
return new torch::Tensor(res);
+ }
else
return nullptr;
}
diff --git a/src/Native/build.cmd b/src/Native/build.cmd
index c0c26c600..9b3b901d1 100644
--- a/src/Native/build.cmd
+++ b/src/Native/build.cmd
@@ -160,4 +160,4 @@ exit /B 0
:Failure
:: Build failed
echo Failed to generate native component build project!
-exit /b 1
+exit /b 1
\ No newline at end of file
diff --git a/src/Native/build.proj b/src/Native/build.proj
index 6dbbc70a9..a6898465d 100644
--- a/src/Native/build.proj
+++ b/src/Native/build.proj
@@ -31,7 +31,6 @@
Condition="'$(OS)' != 'Windows_NT'">
-
--stripsymbols
--configuration $(NativeConfiguration) --arch $(TargetArchitecture) $(StripArgs) --libtorchpath $(LibTorchCmakePath)
@@ -44,9 +43,13 @@
-
+
$(NativeConfiguration) $(TargetArchitecture) --libtorchpath $(LibTorchCmakePath)
+
+
+ $(NativeConfiguration) $(TargetArchitecture) --libtorchpath $(CustomLibTorchFullPath)
+
@@ -57,8 +60,7 @@
-
+
diff --git a/src/TorchSharp/Amp/AMPManager.cs b/src/TorchSharp/Amp/AMPManager.cs
new file mode 100644
index 000000000..11bc1aaa2
--- /dev/null
+++ b/src/TorchSharp/Amp/AMPManager.cs
@@ -0,0 +1,215 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+using TorchSharp.PInvoke;
+
+namespace TorchSharp.Amp
+{
+ [Obsolete("Use AutocastMode instaed", true)]
+ public class AMPManager : IDisposable
+ {
+
+ //TODO: Make Singleton THREADSAFE
+ public class TensorConverter
+ {
+ //public torch.Tensor Tensor;
+ public IntPtr PrevHandle;
+ public IntPtr Handle;
+ public torch.ScalarType Dtype;
+ public torch.ScalarType FastDtype = torch.ScalarType.Float32;
+ public TensorCalledIn Called, Status;
+ public enum TensorCalledIn
+ {
+ OutSide,
+ InsideEnter
+ }
+
+ public TensorConverter(IntPtr handle)
+ {
+ this.PrevHandle = handle;
+ this.Handle = handle;
+ this.Dtype = (torch.ScalarType)NativeMethods.THSTensor_type(handle);
+ this.FastDtype = AutocastMode.GetInstance().GetFastType();
+
+ Status = TensorConverter.TensorCalledIn.InsideEnter;
+ }
+ /*public TensorConverter(torch.Tensor tensor) : this(tensor.handle)
+ {
+ this.Tensor = tensor;
+ }*/
+ }
+
+ public IList TensorsCasts = new List();
+ public bool IsEnter = false;
+ public bool IsDisposed = false;
+ /*public UnorderedMap TensorPtrs= new UnorderedMap();
+ public UnorderedMap TensorMap= new UnorderedMap();*/
+ private AutocastMode autocastMode=null;
+ public bool IsEnabled {
+ get {
+ if (autocastMode == null)
+ return false;
+ return autocastMode.IsEnabled;
+ }
+ }
+
+ private AMPManager(bool enabled)
+ {
+ if (!torch.cuda_is_available())
+ return;
+ autocastMode = AutocastMode.GetInstance(enabled);
+ }
+
+ private static AMPManager Instance;
+ public static AMPManager GetInstance(bool enabled = false)
+ {
+ return Instance ??= new AMPManager(enabled);
+ }
+
+ private torch.ScalarType GetType(IntPtr handle)
+ {
+ return (torch.ScalarType)NativeMethods.THSTensor_type(handle);
+ }
+
+ public IntPtr AutoCast(IntPtr handle)
+ {
+ return ToIf(handle, AutocastMode.GetInstance().GetFastType());
+ }
+
+ public torch.Tensor AutoCast(torch.Tensor tensor)
+ {
+ return new torch.Tensor(AutoCast(tensor.Handle));
+ //return tensor.to(AutocastMode.GetInstance().GetFastType());
+ }
+ public static IntPtr To(IntPtr ptr, torch.ScalarType type)
+ {
+ Debug.WriteLine($"{nameof(AMPManager)} Tensor converting from: {(torch.ScalarType)NativeMethods.THSTensor_type(ptr)} to: {type}");
+ var res = NativeMethods.THSTensor_to_type(ptr, (sbyte)type);
+ if (res == IntPtr.Zero)
+ torch.CheckForErrors();
+ return res;
+ }
+ public static IntPtr ToIf(IntPtr ptr, torch.ScalarType type)
+ {
+ if (!AMPManager.GetInstance().IsEnabled)
+ return ptr;
+ var res = NativeMethods.THSTensor_to_type(ptr, (sbyte)type);
+ if (res == IntPtr.Zero)
+ torch.CheckForErrors();
+ return res;
+ }
+ private void Revert()
+ {
+ for (int i = 0; i < TensorsCasts.Count; i++) {
+ var tc = TensorsCasts[i];
+ //var tt = new torch.Tensor(tc.Handle);
+ //var t = new torch.Tensor(tc.Handle) { handle = To(tc.Handle, tc.Dtype) };
+ //var t = new torch.Tensor(tc.Handle).to(tc.Dtype);
+ tc.Handle= To(tc.Handle, tc.Dtype);
+ if (tc.Handle != tc.PrevHandle)
+ tc.PrevHandle = To(tc.PrevHandle, tc.Dtype);
+ }
+ //Cast Work very well but UNCASTING (if outscope, not working i dont know why...)
+ //TensorsCasts.Clear();
+ }
+
+
+ private int ExistsHandle(IntPtr handle)
+ {
+ for (int i = 0; i < TensorsCasts.Count; i++)
+ if (TensorsCasts[i].PrevHandle == handle || TensorsCasts[i].Handle == handle)
+ return i;
+ return -1;
+ }
+
+ public IntPtr Work(IntPtr handle, IntPtr prev)
+ {
+ if (!this.IsEnabled)
+ return handle;
+ /*if (IsDisposed && !IsEnter) {
+ Revert(); //Is for cleaned all
+ return IntPtr.Zero;
+ }*/
+ var idx = ExistsHandle(handle);
+ Console.WriteLine($"PTR: {handle}, PREV: {prev}, IDX: {idx}, {GetType(handle)}");
+ if (idx == -1) {
+ var tc = new TensorConverter(handle) { Called = IsEnter
+ ? TensorConverter.TensorCalledIn.InsideEnter
+ : TensorConverter.TensorCalledIn.OutSide
+ };
+
+ if (IsEnter)
+ tc.Handle = To(tc.Handle, tc.FastDtype);
+ TensorsCasts.Add(tc);
+ return tc.Handle;
+ }
+ var tcidx = TensorsCasts[idx];
+ tcidx.Handle = handle;
+ return tcidx.Handle;
+ /*if (!IsEnter && IsDisposed) {
+ if (tcidx.Called == TensorConverter.TensorCalledIn.OutSide) { //Is created outside so this can revert
+ //Is From Outside and is disposed, the tensor is created Outside so i will revert this
+ tcidx.PrevHandle = tcidx.Handle;
+ tcidx.Handle = To(tcidx.Handle, tcidx.Dtype);
+ }
+ return tcidx.Handle;
+ }
+ if (GetType(tcidx.Handle) == tcidx.FastDtype)
+ return tcidx.Handle;
+
+ if (IsEnter) {
+ tcidx.PrevHandle = tcidx.Handle;
+ tcidx.Handle = To(tcidx.Handle, tcidx.FastDtype);
+ }
+ return tcidx.Handle;*/
+ }
+
+ public IDisposable Enter()
+ {
+ if (!torch.cuda_is_available())
+ return this;
+ IsEnter = true;
+ IsDisposed = false;
+ autocastMode.SetEnabled(true, torch.CUDA);
+ Debug.WriteLine($"{nameof(AMPManager)} Enter call");
+ return this;
+ }
+ protected virtual void Dispose(bool disposing)
+ {
+ Debug.WriteLine($"{nameof(AMPManager)} Disposed call");
+ IsDisposed = true;
+ IsEnter = false;
+ Revert();
+ //Work(IntPtr.Zero, IntPtr.Zero);
+ autocastMode.Dispose();
+ //Revert();
+ /*TensorPtrs.Dispose();
+ TensorMap.Dispose();*/
+ /*if (!disposedValue) {
+ if (disposing) {
+
+
+ // TODO: dispose managed state (managed objects)
+ }
+
+ // TODO: free unmanaged resources (unmanaged objects) and override finalizer
+ // TODO: set large fields to null
+ disposedValue = true;
+ }*/
+ }
+
+ // // TODO: override finalizer only if 'Dispose(bool disposing)' has code to free unmanaged resources
+ /*~AMPManager()
+ {
+ Dispose(false);
+ }*/
+
+ public void Dispose()
+ {
+ // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
+ Dispose(disposing: true);
+ GC.SuppressFinalize(this);
+ }
+ }
+}
diff --git a/src/TorchSharp/Amp/AutocastMode.cs b/src/TorchSharp/Amp/AutocastMode.cs
new file mode 100644
index 000000000..9186ac913
--- /dev/null
+++ b/src/TorchSharp/Amp/AutocastMode.cs
@@ -0,0 +1,222 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Security.Cryptography;
+using System.Text;
+using System.Threading.Tasks;
+using TorchSharp.PInvoke;
+using TorchSharp.Utils;
+
+namespace TorchSharp.Amp
+{
+ /*public static class Autocast
+ {
+ public static torch.Tensor AutoCast(this torch.Tensor input)
+ {
+ return AutocastMode.GetInstance().CastTensor(input);
+ }
+ }*/
+ //TODO: Should make Singleton and IDisposable on ENTER
+ public sealed class AutocastMode : IDisposable
+ {
+ public bool _enabled=false;
+ public bool IsEnter { private set; get; }=false;
+ public bool IsDisposed = false;
+ private bool prev_cache_enabled, prev;
+ private torch.ScalarType prev_fastdtype;
+ //internal bool Prev;
+ private bool _cache_enabled=false;
+ internal torch.ScalarType fast_dtype = torch.ScalarType.Float32;
+ internal torch.ScalarType? dtype = torch.ScalarType.Float32;
+ public DeviceType device = DeviceType.CUDA;
+ private static AutocastMode instance;
+ public static AutocastMode GetInstance(bool enabled=false)
+ {
+ //https://github.com/pytorch/pytorch/blob/e6ff07f00e04a9b58efb86a3dd70ed7280ae8522/torch/fx/experimental/proxy_tensor.py#L1251
+ return instance ??= new AutocastMode(torch.cuda_is_available() ? torch.CUDA : torch.CPU, enabled:enabled,cache_enabled:true);
+ }
+
+ private AutocastMode(torch.Device dev, torch.ScalarType? dtype = null, bool enabled=true, bool? cache_enabled = null)
+ {
+ //https://pytorch.org/docs/stable/amp.html#cuda-ops-that-can-autocast-to-float16
+ if (dtype == null)
+ dtype = torch.get_autocast_dtype(dev.type);
+ this.device = dev.type;
+ if (!torch.is_autocast_available(device))
+ throw new Exception($"User specified an unsupported autocast device_type {device}");
+ fast_dtype = torch.get_autocast_dtype(device); //If device is CPU this may return as BFloat16
+ _cache_enabled = torch.is_autocast_cache_enabled();
+ if (enabled && !torch.cuda_is_available() && dev.type == DeviceType.CUDA) //Is not available for doing multicast
+ enabled = false;
+ if (this.dtype.HasValue)
+ fast_dtype = dtype.Value;
+ if (cache_enabled.HasValue)
+ _cache_enabled = cache_enabled.Value;
+ if (dev.type != DeviceType.CPU && dev.type != DeviceType.CUDA && enabled)
+ throw new Exception($"Currently autocast does not support {dev.type} only CPU or CUDA");
+ /*if (dev.type == DeviceType.CPU) {
+ if (torch.get_autocast_dtype(device) != torch.ScalarType.Float32) {
+ Debug.WriteLine($"Currently is not support {torch.get_autocast_dtype(device)} on CPU, that feature will be add.");
+ }
+ fast_dtype = torch.ScalarType.Float32;
+ }*/
+ if (dev.type == DeviceType.CPU) {
+ //https://github.com/pytorch/pytorch/blob/e6ff07f00e04a9b58efb86a3dd70ed7280ae8522/torch/amp/autocast_mode.py#L277
+ if (enabled && (fast_dtype != torch.ScalarType.Float16 || fast_dtype != torch.ScalarType.BFloat16)) {
+ Debug.WriteLine($"In CPU autocast, but the target dtype is not suported. Disabling autocast. CPU autocast only supports dtype of {torch.ScalarType.Float16} or {torch.ScalarType.BFloat16}");
+ enabled = false;
+ }
+ } else if (dev.type == DeviceType.CUDA) {
+ if (enabled && fast_dtype == torch.ScalarType.BFloat16 && !torch.cuda.is_bf16_supported())
+ throw new Exception("Current CUDA Device does not support bfloat16. Please switch dtype to float16.");
+ }
+
+ torch.set_autocast_enabled(dev.type, true);
+ this._enabled = enabled;
+ }
+
+ public torch.ScalarType GetFastType()
+ {
+ return torch.get_autocast_dtype(device);
+ }
+ private static torch.ScalarType GetDtype(IntPtr handle)
+ {
+ return (torch.ScalarType)NativeMethods.THSTensor_type(handle);
+ }
+
+ public static IntPtr AutoCast(IntPtr handle)
+ {
+ return ToIf(handle, GetInstance().GetFastType());
+ }
+ public static (IntPtr h1, IntPtr h2) AutoCast(IntPtr handle1, IntPtr handle2)
+ {
+ var ft = GetInstance().GetFastType();
+ return (ToIf(handle1, ft), ToIf(handle2, ft));
+ }
+ public static (IntPtr h1, IntPtr h2, IntPtr h3) AutoCast(IntPtr handle1, IntPtr handle2, IntPtr handle3)
+ {
+ var ft = GetInstance().GetFastType();
+ return (ToIf(handle1, ft), ToIf(handle2, ft), ToIf(handle3, ft));
+ }
+ public static (IntPtr h1, IntPtr h2) AutoCast(IntPtr handle1, IntPtr handle2, torch.ScalarType dtype)
+ {
+ return (ToIf(handle1, dtype), ToIf(handle2, dtype));
+ }
+
+ public static (IntPtr h1, IntPtr h2, IntPtr h3) AutoCast(IntPtr handle1, IntPtr handle2, IntPtr handle3, torch.ScalarType dtype)
+ {
+ return (ToIf(handle1, dtype), ToIf(handle2, dtype), ToIf(handle3, dtype));
+ }
+
+ public static IntPtr AutoCast(IntPtr handle, torch.ScalarType dtype)
+ {
+ return ToIf(handle, dtype);
+ }
+
+ public static torch.Tensor AutoCast(torch.Tensor tensor)
+ {
+ return new torch.Tensor(AutoCast(tensor.Handle));
+ //return tensor.to(AutocastMode.GetInstance().GetFastType());
+ }
+ public static IntPtr To(IntPtr ptr, torch.ScalarType type)
+ {
+ Debug.WriteLine($"{nameof(AutocastMode)} Tensor converting from: {GetDtype(ptr)} to: {type}");
+ var res = NativeMethods.THSTensor_to_type(ptr, (sbyte)type, false, false);
+ if (res == IntPtr.Zero)
+ torch.CheckForErrors();
+ return res;
+ }
+
+ private static DeviceType GetDeviceType(IntPtr ptr)
+ {
+ return (DeviceType)NativeMethods.THSTensor_device_type(ptr);
+ }
+ public static IntPtr ToIf(IntPtr ptr, torch.ScalarType type)
+ {
+ if(GetInstance().device != DeviceType.CPU) //Warning: Remove this if is finished and working the struct BFloat16 C10
+ if (!IsAutocastEnabled() || !GetInstance().IsEnter)
+ return ptr;
+ if (GetDtype(ptr) == type) //if already have same dtype is not necesary convert to dtype, right???
+ return ptr;
+
+ //TODO: Check if is from CPU to passing BFloat16 if support
+ /*if (!NativeMethods.THSAmp_is_autocast_enabled(NativeMethods.THSTensor_device_type(ptr)))
+ return ptr;*/
+ var res = NativeMethods.THSTensor_to_type(ptr, (sbyte)type, false, false);
+ if (res == IntPtr.Zero)
+ torch.CheckForErrors();
+ return res;
+ }
+ public static IntPtr ToIf(IntPtr ptr, torch.ScalarType type, DeviceType device_type)
+ {
+ bool is_elegible = GetDtype(ptr) != torch.ScalarType.Float64 && GetDeviceType(ptr) == device_type;
+
+ if (!NativeMethods.THSAmp_is_autocast_enabled(NativeMethods.THSTensor_device_type(ptr)))
+ return ptr;
+ var res = NativeMethods.THSTensor_to_type(ptr, (sbyte)type, false,false);
+ if (res == IntPtr.Zero)
+ torch.CheckForErrors();
+ return res;
+ }
+
+ public static bool IsAutocastEnabled(DeviceType device = DeviceType.CUDA)
+ {
+ return torch.is_autocast_enabled(!torch.cuda_is_available() ? DeviceType.CPU : device);
+ }
+
+ public IDisposable Enter()
+ {
+ prev_cache_enabled = torch.is_autocast_cache_enabled();
+ prev = torch.is_autocast_enabled(device);
+ prev_fastdtype = torch.get_autocast_dtype(device);
+ torch.set_autocast_enabled(device, _enabled);
+ torch.set_autocast_dtype(device, fast_dtype);
+ torch.autocast_increment_nesting();
+ torch.set_autocast_cache_enabled(_cache_enabled);
+ IsEnter = true;
+ /*if (!_enabled) //Research this, may mbad idea????
+ return new AutocastMode(new torch.Device(DeviceType.CUDA));*/
+ return this;
+ }
+
+ public static IDisposable AutoCastEnter()
+ {
+ return AutocastMode.GetInstance().Enter();
+ }
+
+ public void Disabled()
+ {
+ _enabled = false;
+ Dispose();
+ }
+ private void Dispose(bool disposing)
+ {
+ IsEnter = false;
+ if (torch.autocast_decrement_nesting() == 0)
+ torch.clear_autocast_cache();
+ torch.set_autocast_enabled(device, prev);
+ torch.set_autocast_dtype(device, prev_fastdtype);
+ torch.set_autocast_cache_enabled(prev_cache_enabled);
+ }
+
+ public void Dispose()
+ {
+ Dispose(disposing: true);
+ GC.SuppressFinalize(this);
+ }
+ }
+ ///
+ /// Trying to make Custom Autocast forwarded that mean in Pytorch
+ /// like this @torch.autocast(device_type="cuda")
+ ///
+ public class AutocastAttribute : Attribute
+ {
+ private DeviceType Dev;
+ public AutocastAttribute(DeviceType dev)
+ {
+ Dev = dev;
+ }
+ }
+}
diff --git a/src/TorchSharp/Amp/GradScaler.cs b/src/TorchSharp/Amp/GradScaler.cs
new file mode 100644
index 000000000..cff0bcf2e
--- /dev/null
+++ b/src/TorchSharp/Amp/GradScaler.cs
@@ -0,0 +1,574 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using TorchSharp.Modules;
+using TorchSharp.Utils;
+
+namespace TorchSharp.Amp
+{
+ public class GradScaler : IDisposable
+ {
+ private bool Enabled;
+ public torch.Device device;
+ private torch.Tensor _scale, _growth_tracker;
+ private double _init_scale;
+ private long _init_growth_tracker;
+ public double _growth_factor;
+ public double _backoff_factor;
+ private int _growth_interval;
+ //private UnorderedMap> _per_optimizer_states = new UnorderedMap>();
+ private UnorderedMap> _per_optimizer_states = new UnorderedMap>();
+ bool disposedValue;
+
+ public enum OptState
+ {
+ Ready,
+ Unscaled,
+ Stepped
+ }
+
+ private UnorderedMap _refresh_per_optimizer_state()
+ {
+ return new UnorderedMap() {
+ { "stage", OptState.Ready }, { "found_inf_per_device", null}
+ };
+ }
+ //https://github.com/pytorch/pytorch/blob/main/torch/amp/grad_scaler.py
+ public GradScaler(torch.Device dev, double init_scale = 65536, double growth_factor = 2.0,
+ double backoff_factor = 0.5, int growth_interval = 2000, bool enabled = true)
+ {
+ //https://gist.github.com/dorpxam/67ad2bc222b2cf567d4a6fc298375e13
+ Debug.Assert(dev.type == DeviceType.CPU || dev.type== DeviceType.CUDA);
+ device = dev;
+ Enabled = enabled;
+ _init_scale = init_scale;
+ if (Enabled) {
+ Debug.Assert(growth_factor > 1.0);
+ Debug.Assert(backoff_factor < 1.0);
+ }
+ this._growth_factor = growth_factor;
+ _backoff_factor = backoff_factor;
+ _growth_interval = growth_interval;
+ _init_growth_tracker = 0;
+
+ //_per_optimizer_states.SetDefaultDict(_refresh_per_optimizer_state());
+ //throw new NotImplementedException("This need to finish");
+ }
+
+
+ private Tuple check_scale_growth_tracker(string name)
+ {
+ var fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration.";
+ Debug.Assert(!(_scale is null), $"Attempted {name} but {nameof(_scale)} is None {fix}");
+ Debug.Assert(!(_growth_tracker is null), $"Attempted {name} but {nameof(_growth_tracker)} is None {fix}");
+ return new Tuple(_scale, _growth_tracker);
+ }
+
+
+ private void LazyInitScaleGrowthTracker(torch.Device dev)
+ {
+ Debug.Assert(_growth_tracker is null, "_growth_tracker initialized before _scale");
+
+ _scale = torch.full(1, _init_scale, torch.ScalarType.Float32, device: dev);
+ _growth_tracker = torch.full(1, _init_growth_tracker, torch.ScalarType.Int32, device: dev);
+ }
+ public torch.Tensor scale(torch.Tensor output)
+ {
+ if (!Enabled)
+ return output;
+ if (_scale is null)
+ LazyInitScaleGrowthTracker(output.device);
+ Debug.Assert(!(_scale is null));
+ return output * _scale.to(output.device, output.dtype, true);
+ }
+
+ public IList scale(IList outputs)
+ {
+ List stash = new List();
+
+ object ApplyScale(object value)
+ {
+ if (value is torch.Tensor tensor) {
+ Debug.Assert(tensor.device_type == DeviceType.CUDA || tensor.device_type == DeviceType.XLA);
+
+ if (stash.Count == 0) // if (stash.empty())
+ {
+ if (_scale is null || _scale.IsInvalid) {
+ LazyInitScaleGrowthTracker(tensor.device);
+ //_lazy_init_scale_growth_tracker(tensor.device);
+ }
+
+ Debug.Assert(_scale is not null && !_scale.IsInvalid);
+
+ stash.Add(new MultiDeviceReplicator(_scale)); // stash.push_back(...)
+ }
+
+ // stash.front().get(...)
+ return tensor * stash[0].Get(tensor.device_type);
+ }
+
+ if (value is IEnumerable innerIenumer) {
+ var res = new List