Spaces:
Sleeping
Sleeping
增加读latex文章的功能,添加测试样例
Browse files- crazy_functions/test_project/Cpp/libJPG/JpegLibrary.tps +15 -0
- crazy_functions/test_project/Cpp/libJPG/UElibJPG.Build.cs +17 -0
- crazy_functions/test_project/Cpp/libJPG/jpeg-compressor.tps +15 -0
- crazy_functions/test_project/Cpp/libJPG/jpgd.cpp +3276 -0
- crazy_functions/test_project/Cpp/libJPG/jpgd.h +316 -0
- crazy_functions/test_project/Cpp/libJPG/jpge.cpp +1049 -0
- crazy_functions/test_project/Cpp/libJPG/jpge.h +172 -0
- crazy_functions/test_project/Cpp/libJPG/来源 +3 -0
- crazy_functions/test_project/latex/attention/background.tex +58 -0
- crazy_functions/test_project/latex/attention/introduction.tex +18 -0
- crazy_functions/test_project/latex/attention/model_architecture.tex +155 -0
- crazy_functions/test_project/latex/attention/parameter_attention.tex +45 -0
- crazy_functions/test_project/latex/attention/results.tex +166 -0
- crazy_functions/test_project/latex/attention/sqrt_d_trick.tex +28 -0
- crazy_functions/test_project/latex/attention/training.tex +42 -0
- crazy_functions/test_project/latex/attention/visualizations.tex +18 -0
- crazy_functions/test_project/latex/attention/why_self_attention.tex +98 -0
- crazy_functions/test_project/latex/attention/来源 +8 -0
- crazy_functions/test_project/python/dqn/__init__.py +2 -0
- crazy_functions/test_project/python/dqn/dqn.py +245 -0
- crazy_functions/test_project/python/dqn/policies.py +237 -0
- crazy_functions/test_project/python/dqn/来源 +2 -0
- crazy_functions/读文章写摘要.py +98 -0
- functional_crazy.py +29 -5
- predict.py +1 -0
crazy_functions/test_project/Cpp/libJPG/JpegLibrary.tps
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="utf-8"?>
|
2 |
+
<TpsData xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
3 |
+
<Name>Jpeg Library</Name>
|
4 |
+
<Location>/Engine/Source/ThirdParty/libJPG/</Location>
|
5 |
+
<Date>2016-06-10T14:04:17.9005402-04:00</Date>
|
6 |
+
<Function>We need it because it is a 3rd party lib in GFx</Function>
|
7 |
+
<Justification />
|
8 |
+
<Eula> See license in download: http://www.ijg.org/</Eula>
|
9 |
+
<RedistributeTo>
|
10 |
+
<EndUserGroup>Licensees</EndUserGroup>
|
11 |
+
<EndUserGroup>Git</EndUserGroup>
|
12 |
+
<EndUserGroup>P4</EndUserGroup>
|
13 |
+
</RedistributeTo>
|
14 |
+
<LicenseFolder>/Engine/Source/ThirdParty/Licenses/JPEG_License.txt</LicenseFolder>
|
15 |
+
</TpsData>
|
crazy_functions/test_project/Cpp/libJPG/UElibJPG.Build.cs
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright Epic Games, Inc. All Rights Reserved.
|
2 |
+
|
3 |
+
using UnrealBuildTool;
|
4 |
+
|
5 |
+
public class UElibJPG : ModuleRules
|
6 |
+
{
|
7 |
+
public UElibJPG(ReadOnlyTargetRules Target) : base(Target)
|
8 |
+
{
|
9 |
+
Type = ModuleType.External;
|
10 |
+
|
11 |
+
string libJPGPath = Target.UEThirdPartySourceDirectory + "libJPG";
|
12 |
+
PublicIncludePaths.Add(libJPGPath);
|
13 |
+
|
14 |
+
ShadowVariableWarningLevel = WarningLevel.Off;
|
15 |
+
}
|
16 |
+
}
|
17 |
+
|
crazy_functions/test_project/Cpp/libJPG/jpeg-compressor.tps
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="utf-8"?>
|
2 |
+
<TpsData xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
3 |
+
<Name>jpeg-compressor</Name>
|
4 |
+
<Location>/Engine/Source/ThirdParty/libJPG/</Location>
|
5 |
+
<Date>2016-06-10T14:07:13.8351319-04:00</Date>
|
6 |
+
<Function>Allows JPEG compression and decompression.</Function>
|
7 |
+
<Justification>Compressing video frames at runtime for reduced memory usage. Decompression to access the data afterwards.</Justification>
|
8 |
+
<Eula>https://code.google.com/archive/p/jpeg-compressor/</Eula>
|
9 |
+
<RedistributeTo>
|
10 |
+
<EndUserGroup>Licensees</EndUserGroup>
|
11 |
+
<EndUserGroup>Git</EndUserGroup>
|
12 |
+
<EndUserGroup>P4</EndUserGroup>
|
13 |
+
</RedistributeTo>
|
14 |
+
<LicenseFolder>None</LicenseFolder>
|
15 |
+
</TpsData>
|
crazy_functions/test_project/Cpp/libJPG/jpgd.cpp
ADDED
@@ -0,0 +1,3276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// jpgd.cpp - C++ class for JPEG decompression.
|
2 |
+
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
3 |
+
// Last updated Apr. 16, 2011
|
4 |
+
// Alex Evans: Linear memory allocator (taken from jpge.h).
|
5 |
+
//
|
6 |
+
// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2.
|
7 |
+
//
|
8 |
+
// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling.
|
9 |
+
// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain"
|
10 |
+
// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html
|
11 |
+
|
12 |
+
#include "jpgd.h"
|
13 |
+
#include <string.h>
|
14 |
+
|
15 |
+
#include <assert.h>
|
16 |
+
// BEGIN EPIC MOD
|
17 |
+
#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0
|
18 |
+
// END EPIC MOD
|
19 |
+
|
20 |
+
#ifdef _MSC_VER
|
21 |
+
#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable
|
22 |
+
#endif
|
23 |
+
|
24 |
+
// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling).
|
25 |
+
// This is slower, but results in higher quality on images with highly saturated colors.
|
26 |
+
#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1
|
27 |
+
|
28 |
+
#define JPGD_TRUE (1)
|
29 |
+
#define JPGD_FALSE (0)
|
30 |
+
|
31 |
+
#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b))
|
32 |
+
#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b))
|
33 |
+
|
34 |
+
namespace jpgd {
|
35 |
+
|
36 |
+
static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
|
37 |
+
static inline void jpgd_free(void *p) { FMemory::Free(p); }
|
38 |
+
|
39 |
+
// BEGIN EPIC MOD
|
40 |
+
//@UE3 - use UE3 BGRA encoding instead of assuming RGBA
|
41 |
+
// stolen from IImageWrapper.h
|
42 |
+
enum ERGBFormatJPG
|
43 |
+
{
|
44 |
+
Invalid = -1,
|
45 |
+
RGBA = 0,
|
46 |
+
BGRA = 1,
|
47 |
+
Gray = 2,
|
48 |
+
};
|
49 |
+
static ERGBFormatJPG jpg_format;
|
50 |
+
// END EPIC MOD
|
51 |
+
|
52 |
+
// DCT coefficients are stored in this sequence.
|
53 |
+
static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
|
54 |
+
|
55 |
+
enum JPEG_MARKER
|
56 |
+
{
|
57 |
+
M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8,
|
58 |
+
M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC,
|
59 |
+
M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7,
|
60 |
+
M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF,
|
61 |
+
M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0
|
62 |
+
};
|
63 |
+
|
64 |
+
enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 };
|
65 |
+
|
66 |
+
#define CONST_BITS 13
|
67 |
+
#define PASS1_BITS 2
|
68 |
+
#define SCALEDONE ((int32)1)
|
69 |
+
|
70 |
+
#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */
|
71 |
+
#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */
|
72 |
+
#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */
|
73 |
+
#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */
|
74 |
+
#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */
|
75 |
+
#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */
|
76 |
+
#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */
|
77 |
+
#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */
|
78 |
+
#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */
|
79 |
+
#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */
|
80 |
+
#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */
|
81 |
+
#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */
|
82 |
+
|
83 |
+
#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n))
|
84 |
+
#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n))
|
85 |
+
|
86 |
+
#define MULTIPLY(var, cnst) ((var) * (cnst))
|
87 |
+
|
88 |
+
#define CLAMP(i) ((static_cast<uint>(i) > 255) ? (((~i) >> 31) & 0xFF) : (i))
|
89 |
+
|
90 |
+
// Compiler creates a fast path 1D IDCT for X non-zero columns
|
91 |
+
template <int NONZERO_COLS>
|
92 |
+
struct Row
|
93 |
+
{
|
94 |
+
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
95 |
+
{
|
96 |
+
// ACCESS_COL() will be optimized at compile time to either an array access, or 0.
|
97 |
+
#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0)
|
98 |
+
|
99 |
+
const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6);
|
100 |
+
|
101 |
+
const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
|
102 |
+
const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
|
103 |
+
const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
|
104 |
+
|
105 |
+
const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS;
|
106 |
+
const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS;
|
107 |
+
|
108 |
+
const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
|
109 |
+
|
110 |
+
const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1);
|
111 |
+
|
112 |
+
const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
|
113 |
+
const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
|
114 |
+
|
115 |
+
const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
|
116 |
+
const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
|
117 |
+
const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
|
118 |
+
const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
|
119 |
+
|
120 |
+
const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
|
121 |
+
const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
|
122 |
+
const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
|
123 |
+
const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
|
124 |
+
|
125 |
+
pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS);
|
126 |
+
pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS);
|
127 |
+
pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS);
|
128 |
+
pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS);
|
129 |
+
pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS);
|
130 |
+
pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS);
|
131 |
+
pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS);
|
132 |
+
pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS);
|
133 |
+
}
|
134 |
+
};
|
135 |
+
|
136 |
+
template <>
|
137 |
+
struct Row<0>
|
138 |
+
{
|
139 |
+
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
140 |
+
{
|
141 |
+
#ifdef _MSC_VER
|
142 |
+
pTemp; pSrc;
|
143 |
+
#endif
|
144 |
+
}
|
145 |
+
};
|
146 |
+
|
147 |
+
template <>
|
148 |
+
struct Row<1>
|
149 |
+
{
|
150 |
+
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
151 |
+
{
|
152 |
+
const int dcval = (pSrc[0] << PASS1_BITS);
|
153 |
+
|
154 |
+
pTemp[0] = dcval;
|
155 |
+
pTemp[1] = dcval;
|
156 |
+
pTemp[2] = dcval;
|
157 |
+
pTemp[3] = dcval;
|
158 |
+
pTemp[4] = dcval;
|
159 |
+
pTemp[5] = dcval;
|
160 |
+
pTemp[6] = dcval;
|
161 |
+
pTemp[7] = dcval;
|
162 |
+
}
|
163 |
+
};
|
164 |
+
|
165 |
+
// Compiler creates a fast path 1D IDCT for X non-zero rows
|
166 |
+
template <int NONZERO_ROWS>
|
167 |
+
struct Col
|
168 |
+
{
|
169 |
+
static void idct(uint8* pDst_ptr, const int* pTemp)
|
170 |
+
{
|
171 |
+
// ACCESS_ROW() will be optimized at compile time to either an array access, or 0.
|
172 |
+
#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0)
|
173 |
+
|
174 |
+
const int z2 = ACCESS_ROW(2);
|
175 |
+
const int z3 = ACCESS_ROW(6);
|
176 |
+
|
177 |
+
const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
|
178 |
+
const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
|
179 |
+
const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
|
180 |
+
|
181 |
+
const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS;
|
182 |
+
const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS;
|
183 |
+
|
184 |
+
const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
|
185 |
+
|
186 |
+
const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1);
|
187 |
+
|
188 |
+
const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
|
189 |
+
const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
|
190 |
+
|
191 |
+
const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
|
192 |
+
const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
|
193 |
+
const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
|
194 |
+
const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
|
195 |
+
|
196 |
+
const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
|
197 |
+
const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
|
198 |
+
const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
|
199 |
+
const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
|
200 |
+
|
201 |
+
int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3);
|
202 |
+
pDst_ptr[8*0] = (uint8)CLAMP(i);
|
203 |
+
|
204 |
+
i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3);
|
205 |
+
pDst_ptr[8*7] = (uint8)CLAMP(i);
|
206 |
+
|
207 |
+
i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3);
|
208 |
+
pDst_ptr[8*1] = (uint8)CLAMP(i);
|
209 |
+
|
210 |
+
i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3);
|
211 |
+
pDst_ptr[8*6] = (uint8)CLAMP(i);
|
212 |
+
|
213 |
+
i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3);
|
214 |
+
pDst_ptr[8*2] = (uint8)CLAMP(i);
|
215 |
+
|
216 |
+
i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3);
|
217 |
+
pDst_ptr[8*5] = (uint8)CLAMP(i);
|
218 |
+
|
219 |
+
i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3);
|
220 |
+
pDst_ptr[8*3] = (uint8)CLAMP(i);
|
221 |
+
|
222 |
+
i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3);
|
223 |
+
pDst_ptr[8*4] = (uint8)CLAMP(i);
|
224 |
+
}
|
225 |
+
};
|
226 |
+
|
227 |
+
template <>
|
228 |
+
struct Col<1>
|
229 |
+
{
|
230 |
+
static void idct(uint8* pDst_ptr, const int* pTemp)
|
231 |
+
{
|
232 |
+
int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3);
|
233 |
+
const uint8 dcval_clamped = (uint8)CLAMP(dcval);
|
234 |
+
pDst_ptr[0*8] = dcval_clamped;
|
235 |
+
pDst_ptr[1*8] = dcval_clamped;
|
236 |
+
pDst_ptr[2*8] = dcval_clamped;
|
237 |
+
pDst_ptr[3*8] = dcval_clamped;
|
238 |
+
pDst_ptr[4*8] = dcval_clamped;
|
239 |
+
pDst_ptr[5*8] = dcval_clamped;
|
240 |
+
pDst_ptr[6*8] = dcval_clamped;
|
241 |
+
pDst_ptr[7*8] = dcval_clamped;
|
242 |
+
}
|
243 |
+
};
|
244 |
+
|
245 |
+
static const uint8 s_idct_row_table[] =
|
246 |
+
{
|
247 |
+
1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0,
|
248 |
+
4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0,
|
249 |
+
6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0,
|
250 |
+
6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0,
|
251 |
+
8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2,
|
252 |
+
8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2,
|
253 |
+
8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4,
|
254 |
+
8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8,
|
255 |
+
};
|
256 |
+
|
257 |
+
static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 };
|
258 |
+
|
259 |
+
void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag)
|
260 |
+
{
|
261 |
+
JPGD_ASSERT(block_max_zag >= 1);
|
262 |
+
JPGD_ASSERT(block_max_zag <= 64);
|
263 |
+
|
264 |
+
if (block_max_zag == 1)
|
265 |
+
{
|
266 |
+
int k = ((pSrc_ptr[0] + 4) >> 3) + 128;
|
267 |
+
k = CLAMP(k);
|
268 |
+
k = k | (k<<8);
|
269 |
+
k = k | (k<<16);
|
270 |
+
|
271 |
+
for (int i = 8; i > 0; i--)
|
272 |
+
{
|
273 |
+
*(int*)&pDst_ptr[0] = k;
|
274 |
+
*(int*)&pDst_ptr[4] = k;
|
275 |
+
pDst_ptr += 8;
|
276 |
+
}
|
277 |
+
return;
|
278 |
+
}
|
279 |
+
|
280 |
+
int temp[64];
|
281 |
+
|
282 |
+
const jpgd_block_t* pSrc = pSrc_ptr;
|
283 |
+
int* pTemp = temp;
|
284 |
+
|
285 |
+
const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8];
|
286 |
+
int i;
|
287 |
+
for (i = 8; i > 0; i--, pRow_tab++)
|
288 |
+
{
|
289 |
+
switch (*pRow_tab)
|
290 |
+
{
|
291 |
+
case 0: Row<0>::idct(pTemp, pSrc); break;
|
292 |
+
case 1: Row<1>::idct(pTemp, pSrc); break;
|
293 |
+
case 2: Row<2>::idct(pTemp, pSrc); break;
|
294 |
+
case 3: Row<3>::idct(pTemp, pSrc); break;
|
295 |
+
case 4: Row<4>::idct(pTemp, pSrc); break;
|
296 |
+
case 5: Row<5>::idct(pTemp, pSrc); break;
|
297 |
+
case 6: Row<6>::idct(pTemp, pSrc); break;
|
298 |
+
case 7: Row<7>::idct(pTemp, pSrc); break;
|
299 |
+
case 8: Row<8>::idct(pTemp, pSrc); break;
|
300 |
+
}
|
301 |
+
|
302 |
+
pSrc += 8;
|
303 |
+
pTemp += 8;
|
304 |
+
}
|
305 |
+
|
306 |
+
pTemp = temp;
|
307 |
+
|
308 |
+
const int nonzero_rows = s_idct_col_table[block_max_zag - 1];
|
309 |
+
for (i = 8; i > 0; i--)
|
310 |
+
{
|
311 |
+
switch (nonzero_rows)
|
312 |
+
{
|
313 |
+
case 1: Col<1>::idct(pDst_ptr, pTemp); break;
|
314 |
+
case 2: Col<2>::idct(pDst_ptr, pTemp); break;
|
315 |
+
case 3: Col<3>::idct(pDst_ptr, pTemp); break;
|
316 |
+
case 4: Col<4>::idct(pDst_ptr, pTemp); break;
|
317 |
+
case 5: Col<5>::idct(pDst_ptr, pTemp); break;
|
318 |
+
case 6: Col<6>::idct(pDst_ptr, pTemp); break;
|
319 |
+
case 7: Col<7>::idct(pDst_ptr, pTemp); break;
|
320 |
+
case 8: Col<8>::idct(pDst_ptr, pTemp); break;
|
321 |
+
}
|
322 |
+
|
323 |
+
pTemp++;
|
324 |
+
pDst_ptr++;
|
325 |
+
}
|
326 |
+
}
|
327 |
+
|
328 |
+
void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr)
|
329 |
+
{
|
330 |
+
int temp[64];
|
331 |
+
int* pTemp = temp;
|
332 |
+
const jpgd_block_t* pSrc = pSrc_ptr;
|
333 |
+
|
334 |
+
for (int i = 4; i > 0; i--)
|
335 |
+
{
|
336 |
+
Row<4>::idct(pTemp, pSrc);
|
337 |
+
pSrc += 8;
|
338 |
+
pTemp += 8;
|
339 |
+
}
|
340 |
+
|
341 |
+
pTemp = temp;
|
342 |
+
for (int i = 8; i > 0; i--)
|
343 |
+
{
|
344 |
+
Col<4>::idct(pDst_ptr, pTemp);
|
345 |
+
pTemp++;
|
346 |
+
pDst_ptr++;
|
347 |
+
}
|
348 |
+
}
|
349 |
+
|
350 |
+
// Retrieve one character from the input stream.
|
351 |
+
inline uint jpeg_decoder::get_char()
|
352 |
+
{
|
353 |
+
// Any bytes remaining in buffer?
|
354 |
+
if (!m_in_buf_left)
|
355 |
+
{
|
356 |
+
// Try to get more bytes.
|
357 |
+
prep_in_buffer();
|
358 |
+
// Still nothing to get?
|
359 |
+
if (!m_in_buf_left)
|
360 |
+
{
|
361 |
+
// Pad the end of the stream with 0xFF 0xD9 (EOI marker)
|
362 |
+
int t = m_tem_flag;
|
363 |
+
m_tem_flag ^= 1;
|
364 |
+
if (t)
|
365 |
+
return 0xD9;
|
366 |
+
else
|
367 |
+
return 0xFF;
|
368 |
+
}
|
369 |
+
}
|
370 |
+
|
371 |
+
uint c = *m_pIn_buf_ofs++;
|
372 |
+
m_in_buf_left--;
|
373 |
+
|
374 |
+
return c;
|
375 |
+
}
|
376 |
+
|
377 |
+
// Same as previous method, except can indicate if the character is a pad character or not.
|
378 |
+
inline uint jpeg_decoder::get_char(bool *pPadding_flag)
|
379 |
+
{
|
380 |
+
if (!m_in_buf_left)
|
381 |
+
{
|
382 |
+
prep_in_buffer();
|
383 |
+
if (!m_in_buf_left)
|
384 |
+
{
|
385 |
+
*pPadding_flag = true;
|
386 |
+
int t = m_tem_flag;
|
387 |
+
m_tem_flag ^= 1;
|
388 |
+
if (t)
|
389 |
+
return 0xD9;
|
390 |
+
else
|
391 |
+
return 0xFF;
|
392 |
+
}
|
393 |
+
}
|
394 |
+
|
395 |
+
*pPadding_flag = false;
|
396 |
+
|
397 |
+
uint c = *m_pIn_buf_ofs++;
|
398 |
+
m_in_buf_left--;
|
399 |
+
|
400 |
+
return c;
|
401 |
+
}
|
402 |
+
|
403 |
+
// Inserts a previously retrieved character back into the input buffer.
|
404 |
+
inline void jpeg_decoder::stuff_char(uint8 q)
|
405 |
+
{
|
406 |
+
*(--m_pIn_buf_ofs) = q;
|
407 |
+
m_in_buf_left++;
|
408 |
+
}
|
409 |
+
|
410 |
+
// Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered.
|
411 |
+
inline uint8 jpeg_decoder::get_octet()
|
412 |
+
{
|
413 |
+
bool padding_flag;
|
414 |
+
int c = get_char(&padding_flag);
|
415 |
+
|
416 |
+
if (c == 0xFF)
|
417 |
+
{
|
418 |
+
if (padding_flag)
|
419 |
+
return 0xFF;
|
420 |
+
|
421 |
+
c = get_char(&padding_flag);
|
422 |
+
if (padding_flag)
|
423 |
+
{
|
424 |
+
stuff_char(0xFF);
|
425 |
+
return 0xFF;
|
426 |
+
}
|
427 |
+
|
428 |
+
if (c == 0x00)
|
429 |
+
return 0xFF;
|
430 |
+
else
|
431 |
+
{
|
432 |
+
stuff_char(static_cast<uint8>(c));
|
433 |
+
stuff_char(0xFF);
|
434 |
+
return 0xFF;
|
435 |
+
}
|
436 |
+
}
|
437 |
+
|
438 |
+
return static_cast<uint8>(c);
|
439 |
+
}
|
440 |
+
|
441 |
+
// Retrieves a variable number of bits from the input stream. Does not recognize markers.
|
442 |
+
inline uint jpeg_decoder::get_bits(int num_bits)
|
443 |
+
{
|
444 |
+
if (!num_bits)
|
445 |
+
return 0;
|
446 |
+
|
447 |
+
uint i = m_bit_buf >> (32 - num_bits);
|
448 |
+
|
449 |
+
if ((m_bits_left -= num_bits) <= 0)
|
450 |
+
{
|
451 |
+
m_bit_buf <<= (num_bits += m_bits_left);
|
452 |
+
|
453 |
+
uint c1 = get_char();
|
454 |
+
uint c2 = get_char();
|
455 |
+
m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2;
|
456 |
+
|
457 |
+
m_bit_buf <<= -m_bits_left;
|
458 |
+
|
459 |
+
m_bits_left += 16;
|
460 |
+
|
461 |
+
JPGD_ASSERT(m_bits_left >= 0);
|
462 |
+
}
|
463 |
+
else
|
464 |
+
m_bit_buf <<= num_bits;
|
465 |
+
|
466 |
+
return i;
|
467 |
+
}
|
468 |
+
|
469 |
+
// Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered.
|
470 |
+
inline uint jpeg_decoder::get_bits_no_markers(int num_bits)
|
471 |
+
{
|
472 |
+
if (!num_bits)
|
473 |
+
return 0;
|
474 |
+
|
475 |
+
uint i = m_bit_buf >> (32 - num_bits);
|
476 |
+
|
477 |
+
if ((m_bits_left -= num_bits) <= 0)
|
478 |
+
{
|
479 |
+
m_bit_buf <<= (num_bits += m_bits_left);
|
480 |
+
|
481 |
+
if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF))
|
482 |
+
{
|
483 |
+
uint c1 = get_octet();
|
484 |
+
uint c2 = get_octet();
|
485 |
+
m_bit_buf |= (c1 << 8) | c2;
|
486 |
+
}
|
487 |
+
else
|
488 |
+
{
|
489 |
+
m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1];
|
490 |
+
m_in_buf_left -= 2;
|
491 |
+
m_pIn_buf_ofs += 2;
|
492 |
+
}
|
493 |
+
|
494 |
+
m_bit_buf <<= -m_bits_left;
|
495 |
+
|
496 |
+
m_bits_left += 16;
|
497 |
+
|
498 |
+
JPGD_ASSERT(m_bits_left >= 0);
|
499 |
+
}
|
500 |
+
else
|
501 |
+
m_bit_buf <<= num_bits;
|
502 |
+
|
503 |
+
return i;
|
504 |
+
}
|
505 |
+
|
506 |
+
// Decodes a Huffman encoded symbol.
|
507 |
+
inline int jpeg_decoder::huff_decode(huff_tables *pH)
|
508 |
+
{
|
509 |
+
int symbol;
|
510 |
+
|
511 |
+
// Check first 8-bits: do we have a complete symbol?
|
512 |
+
if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0)
|
513 |
+
{
|
514 |
+
// Decode more bits, use a tree traversal to find symbol.
|
515 |
+
int ofs = 23;
|
516 |
+
do
|
517 |
+
{
|
518 |
+
symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
|
519 |
+
ofs--;
|
520 |
+
} while (symbol < 0);
|
521 |
+
|
522 |
+
get_bits_no_markers(8 + (23 - ofs));
|
523 |
+
}
|
524 |
+
else
|
525 |
+
get_bits_no_markers(pH->code_size[symbol]);
|
526 |
+
|
527 |
+
return symbol;
|
528 |
+
}
|
529 |
+
|
530 |
+
// Decodes a Huffman encoded symbol.
|
531 |
+
inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits)
|
532 |
+
{
|
533 |
+
int symbol;
|
534 |
+
|
535 |
+
// Check first 8-bits: do we have a complete symbol?
|
536 |
+
if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0)
|
537 |
+
{
|
538 |
+
// Use a tree traversal to find symbol.
|
539 |
+
int ofs = 23;
|
540 |
+
do
|
541 |
+
{
|
542 |
+
symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
|
543 |
+
ofs--;
|
544 |
+
} while (symbol < 0);
|
545 |
+
|
546 |
+
get_bits_no_markers(8 + (23 - ofs));
|
547 |
+
|
548 |
+
extra_bits = get_bits_no_markers(symbol & 0xF);
|
549 |
+
}
|
550 |
+
else
|
551 |
+
{
|
552 |
+
JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0));
|
553 |
+
|
554 |
+
if (symbol & 0x8000)
|
555 |
+
{
|
556 |
+
get_bits_no_markers((symbol >> 8) & 31);
|
557 |
+
extra_bits = symbol >> 16;
|
558 |
+
}
|
559 |
+
else
|
560 |
+
{
|
561 |
+
int code_size = (symbol >> 8) & 31;
|
562 |
+
int num_extra_bits = symbol & 0xF;
|
563 |
+
int bits = code_size + num_extra_bits;
|
564 |
+
if (bits <= (m_bits_left + 16))
|
565 |
+
extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1);
|
566 |
+
else
|
567 |
+
{
|
568 |
+
get_bits_no_markers(code_size);
|
569 |
+
extra_bits = get_bits_no_markers(num_extra_bits);
|
570 |
+
}
|
571 |
+
}
|
572 |
+
|
573 |
+
symbol &= 0xFF;
|
574 |
+
}
|
575 |
+
|
576 |
+
return symbol;
|
577 |
+
}
|
578 |
+
|
579 |
+
// Tables and macro used to fully decode the DPCM differences.
|
580 |
+
static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 };
|
581 |
+
static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 };
|
582 |
+
static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) };
|
583 |
+
#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x))
|
584 |
+
|
585 |
+
// Clamps a value between 0-255.
|
586 |
+
inline uint8 jpeg_decoder::clamp(int i)
|
587 |
+
{
|
588 |
+
if (static_cast<uint>(i) > 255)
|
589 |
+
i = (((~i) >> 31) & 0xFF);
|
590 |
+
|
591 |
+
return static_cast<uint8>(i);
|
592 |
+
}
|
593 |
+
|
594 |
+
namespace DCT_Upsample
|
595 |
+
{
|
596 |
+
struct Matrix44
|
597 |
+
{
|
598 |
+
typedef int Element_Type;
|
599 |
+
enum { NUM_ROWS = 4, NUM_COLS = 4 };
|
600 |
+
|
601 |
+
Element_Type v[NUM_ROWS][NUM_COLS];
|
602 |
+
|
603 |
+
inline int rows() const { return NUM_ROWS; }
|
604 |
+
inline int cols() const { return NUM_COLS; }
|
605 |
+
|
606 |
+
inline const Element_Type & at(int r, int c) const { return v[r][c]; }
|
607 |
+
inline Element_Type & at(int r, int c) { return v[r][c]; }
|
608 |
+
|
609 |
+
inline Matrix44() { }
|
610 |
+
|
611 |
+
inline Matrix44& operator += (const Matrix44& a)
|
612 |
+
{
|
613 |
+
for (int r = 0; r < NUM_ROWS; r++)
|
614 |
+
{
|
615 |
+
at(r, 0) += a.at(r, 0);
|
616 |
+
at(r, 1) += a.at(r, 1);
|
617 |
+
at(r, 2) += a.at(r, 2);
|
618 |
+
at(r, 3) += a.at(r, 3);
|
619 |
+
}
|
620 |
+
return *this;
|
621 |
+
}
|
622 |
+
|
623 |
+
inline Matrix44& operator -= (const Matrix44& a)
|
624 |
+
{
|
625 |
+
for (int r = 0; r < NUM_ROWS; r++)
|
626 |
+
{
|
627 |
+
at(r, 0) -= a.at(r, 0);
|
628 |
+
at(r, 1) -= a.at(r, 1);
|
629 |
+
at(r, 2) -= a.at(r, 2);
|
630 |
+
at(r, 3) -= a.at(r, 3);
|
631 |
+
}
|
632 |
+
return *this;
|
633 |
+
}
|
634 |
+
|
635 |
+
friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b)
|
636 |
+
{
|
637 |
+
Matrix44 ret;
|
638 |
+
for (int r = 0; r < NUM_ROWS; r++)
|
639 |
+
{
|
640 |
+
ret.at(r, 0) = a.at(r, 0) + b.at(r, 0);
|
641 |
+
ret.at(r, 1) = a.at(r, 1) + b.at(r, 1);
|
642 |
+
ret.at(r, 2) = a.at(r, 2) + b.at(r, 2);
|
643 |
+
ret.at(r, 3) = a.at(r, 3) + b.at(r, 3);
|
644 |
+
}
|
645 |
+
return ret;
|
646 |
+
}
|
647 |
+
|
648 |
+
friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b)
|
649 |
+
{
|
650 |
+
Matrix44 ret;
|
651 |
+
for (int r = 0; r < NUM_ROWS; r++)
|
652 |
+
{
|
653 |
+
ret.at(r, 0) = a.at(r, 0) - b.at(r, 0);
|
654 |
+
ret.at(r, 1) = a.at(r, 1) - b.at(r, 1);
|
655 |
+
ret.at(r, 2) = a.at(r, 2) - b.at(r, 2);
|
656 |
+
ret.at(r, 3) = a.at(r, 3) - b.at(r, 3);
|
657 |
+
}
|
658 |
+
return ret;
|
659 |
+
}
|
660 |
+
|
661 |
+
static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
|
662 |
+
{
|
663 |
+
for (int r = 0; r < 4; r++)
|
664 |
+
{
|
665 |
+
pDst[0*8 + r] = static_cast<jpgd_block_t>(a.at(r, 0) + b.at(r, 0));
|
666 |
+
pDst[1*8 + r] = static_cast<jpgd_block_t>(a.at(r, 1) + b.at(r, 1));
|
667 |
+
pDst[2*8 + r] = static_cast<jpgd_block_t>(a.at(r, 2) + b.at(r, 2));
|
668 |
+
pDst[3*8 + r] = static_cast<jpgd_block_t>(a.at(r, 3) + b.at(r, 3));
|
669 |
+
}
|
670 |
+
}
|
671 |
+
|
672 |
+
static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
|
673 |
+
{
|
674 |
+
for (int r = 0; r < 4; r++)
|
675 |
+
{
|
676 |
+
pDst[0*8 + r] = static_cast<jpgd_block_t>(a.at(r, 0) - b.at(r, 0));
|
677 |
+
pDst[1*8 + r] = static_cast<jpgd_block_t>(a.at(r, 1) - b.at(r, 1));
|
678 |
+
pDst[2*8 + r] = static_cast<jpgd_block_t>(a.at(r, 2) - b.at(r, 2));
|
679 |
+
pDst[3*8 + r] = static_cast<jpgd_block_t>(a.at(r, 3) - b.at(r, 3));
|
680 |
+
}
|
681 |
+
}
|
682 |
+
};
|
683 |
+
|
684 |
+
const int FRACT_BITS = 10;
|
685 |
+
const int SCALE = 1 << FRACT_BITS;
|
686 |
+
|
687 |
+
typedef int Temp_Type;
|
688 |
+
#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS)
|
689 |
+
#define F(i) ((int)((i) * SCALE + .5f))
|
690 |
+
|
691 |
+
// Any decent C++ compiler will optimize this at compile time to a 0, or an array access.
|
692 |
+
#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8])
|
693 |
+
|
694 |
+
// NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix
|
695 |
+
template<int NUM_ROWS, int NUM_COLS>
|
696 |
+
struct P_Q
|
697 |
+
{
|
698 |
+
static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc)
|
699 |
+
{
|
700 |
+
// 4x8 = 4x8 times 8x8, matrix 0 is constant
|
701 |
+
const Temp_Type X000 = AT(0, 0);
|
702 |
+
const Temp_Type X001 = AT(0, 1);
|
703 |
+
const Temp_Type X002 = AT(0, 2);
|
704 |
+
const Temp_Type X003 = AT(0, 3);
|
705 |
+
const Temp_Type X004 = AT(0, 4);
|
706 |
+
const Temp_Type X005 = AT(0, 5);
|
707 |
+
const Temp_Type X006 = AT(0, 6);
|
708 |
+
const Temp_Type X007 = AT(0, 7);
|
709 |
+
const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0));
|
710 |
+
const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1));
|
711 |
+
const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2));
|
712 |
+
const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3));
|
713 |
+
const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4));
|
714 |
+
const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5));
|
715 |
+
const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6));
|
716 |
+
const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7));
|
717 |
+
const Temp_Type X020 = AT(4, 0);
|
718 |
+
const Temp_Type X021 = AT(4, 1);
|
719 |
+
const Temp_Type X022 = AT(4, 2);
|
720 |
+
const Temp_Type X023 = AT(4, 3);
|
721 |
+
const Temp_Type X024 = AT(4, 4);
|
722 |
+
const Temp_Type X025 = AT(4, 5);
|
723 |
+
const Temp_Type X026 = AT(4, 6);
|
724 |
+
const Temp_Type X027 = AT(4, 7);
|
725 |
+
const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0));
|
726 |
+
const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1));
|
727 |
+
const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2));
|
728 |
+
const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3));
|
729 |
+
const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4));
|
730 |
+
const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5));
|
731 |
+
const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6));
|
732 |
+
const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7));
|
733 |
+
|
734 |
+
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
735 |
+
P.at(0, 0) = X000;
|
736 |
+
P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f));
|
737 |
+
P.at(0, 2) = X004;
|
738 |
+
P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f));
|
739 |
+
P.at(1, 0) = X010;
|
740 |
+
P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f));
|
741 |
+
P.at(1, 2) = X014;
|
742 |
+
P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f));
|
743 |
+
P.at(2, 0) = X020;
|
744 |
+
P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f));
|
745 |
+
P.at(2, 2) = X024;
|
746 |
+
P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f));
|
747 |
+
P.at(3, 0) = X030;
|
748 |
+
P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f));
|
749 |
+
P.at(3, 2) = X034;
|
750 |
+
P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f));
|
751 |
+
// 40 muls 24 adds
|
752 |
+
|
753 |
+
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
754 |
+
Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f));
|
755 |
+
Q.at(0, 1) = X002;
|
756 |
+
Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f));
|
757 |
+
Q.at(0, 3) = X006;
|
758 |
+
Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f));
|
759 |
+
Q.at(1, 1) = X012;
|
760 |
+
Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f));
|
761 |
+
Q.at(1, 3) = X016;
|
762 |
+
Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f));
|
763 |
+
Q.at(2, 1) = X022;
|
764 |
+
Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f));
|
765 |
+
Q.at(2, 3) = X026;
|
766 |
+
Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f));
|
767 |
+
Q.at(3, 1) = X032;
|
768 |
+
Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f));
|
769 |
+
Q.at(3, 3) = X036;
|
770 |
+
// 40 muls 24 adds
|
771 |
+
}
|
772 |
+
};
|
773 |
+
|
774 |
+
template<int NUM_ROWS, int NUM_COLS>
|
775 |
+
struct R_S
|
776 |
+
{
|
777 |
+
static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc)
|
778 |
+
{
|
779 |
+
// 4x8 = 4x8 times 8x8, matrix 0 is constant
|
780 |
+
const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0));
|
781 |
+
const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1));
|
782 |
+
const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2));
|
783 |
+
const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3));
|
784 |
+
const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4));
|
785 |
+
const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5));
|
786 |
+
const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6));
|
787 |
+
const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7));
|
788 |
+
const Temp_Type X110 = AT(2, 0);
|
789 |
+
const Temp_Type X111 = AT(2, 1);
|
790 |
+
const Temp_Type X112 = AT(2, 2);
|
791 |
+
const Temp_Type X113 = AT(2, 3);
|
792 |
+
const Temp_Type X114 = AT(2, 4);
|
793 |
+
const Temp_Type X115 = AT(2, 5);
|
794 |
+
const Temp_Type X116 = AT(2, 6);
|
795 |
+
const Temp_Type X117 = AT(2, 7);
|
796 |
+
const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0));
|
797 |
+
const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1));
|
798 |
+
const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2));
|
799 |
+
const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3));
|
800 |
+
const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4));
|
801 |
+
const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5));
|
802 |
+
const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6));
|
803 |
+
const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7));
|
804 |
+
const Temp_Type X130 = AT(6, 0);
|
805 |
+
const Temp_Type X131 = AT(6, 1);
|
806 |
+
const Temp_Type X132 = AT(6, 2);
|
807 |
+
const Temp_Type X133 = AT(6, 3);
|
808 |
+
const Temp_Type X134 = AT(6, 4);
|
809 |
+
const Temp_Type X135 = AT(6, 5);
|
810 |
+
const Temp_Type X136 = AT(6, 6);
|
811 |
+
const Temp_Type X137 = AT(6, 7);
|
812 |
+
// 80 muls 48 adds
|
813 |
+
|
814 |
+
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
815 |
+
R.at(0, 0) = X100;
|
816 |
+
R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f));
|
817 |
+
R.at(0, 2) = X104;
|
818 |
+
R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f));
|
819 |
+
R.at(1, 0) = X110;
|
820 |
+
R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f));
|
821 |
+
R.at(1, 2) = X114;
|
822 |
+
R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f));
|
823 |
+
R.at(2, 0) = X120;
|
824 |
+
R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f));
|
825 |
+
R.at(2, 2) = X124;
|
826 |
+
R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f));
|
827 |
+
R.at(3, 0) = X130;
|
828 |
+
R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f));
|
829 |
+
R.at(3, 2) = X134;
|
830 |
+
R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f));
|
831 |
+
// 40 muls 24 adds
|
832 |
+
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
833 |
+
S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f));
|
834 |
+
S.at(0, 1) = X102;
|
835 |
+
S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f));
|
836 |
+
S.at(0, 3) = X106;
|
837 |
+
S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f));
|
838 |
+
S.at(1, 1) = X112;
|
839 |
+
S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f));
|
840 |
+
S.at(1, 3) = X116;
|
841 |
+
S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f));
|
842 |
+
S.at(2, 1) = X122;
|
843 |
+
S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f));
|
844 |
+
S.at(2, 3) = X126;
|
845 |
+
S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f));
|
846 |
+
S.at(3, 1) = X132;
|
847 |
+
S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f));
|
848 |
+
S.at(3, 3) = X136;
|
849 |
+
// 40 muls 24 adds
|
850 |
+
}
|
851 |
+
};
|
852 |
+
} // end namespace DCT_Upsample
|
853 |
+
|
854 |
+
// Unconditionally frees all allocated m_blocks.
|
855 |
+
void jpeg_decoder::free_all_blocks()
|
856 |
+
{
|
857 |
+
m_pStream = NULL;
|
858 |
+
for (mem_block *b = m_pMem_blocks; b; )
|
859 |
+
{
|
860 |
+
mem_block *n = b->m_pNext;
|
861 |
+
jpgd_free(b);
|
862 |
+
b = n;
|
863 |
+
}
|
864 |
+
m_pMem_blocks = NULL;
|
865 |
+
}
|
866 |
+
|
867 |
+
// This method handles all errors.
|
868 |
+
// It could easily be changed to use C++ exceptions.
|
869 |
+
void jpeg_decoder::stop_decoding(jpgd_status status)
|
870 |
+
{
|
871 |
+
m_error_code = status;
|
872 |
+
free_all_blocks();
|
873 |
+
longjmp(m_jmp_state, status);
|
874 |
+
|
875 |
+
// we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit
|
876 |
+
// that this function doesn't return, otherwise we get this error:
|
877 |
+
//
|
878 |
+
// error : function declared 'noreturn' should not return
|
879 |
+
exit(1);
|
880 |
+
}
|
881 |
+
|
882 |
+
void *jpeg_decoder::alloc(size_t nSize, bool zero)
|
883 |
+
{
|
884 |
+
nSize = (JPGD_MAX(nSize, 1) + 3) & ~3;
|
885 |
+
char *rv = NULL;
|
886 |
+
for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext)
|
887 |
+
{
|
888 |
+
if ((b->m_used_count + nSize) <= b->m_size)
|
889 |
+
{
|
890 |
+
rv = b->m_data + b->m_used_count;
|
891 |
+
b->m_used_count += nSize;
|
892 |
+
break;
|
893 |
+
}
|
894 |
+
}
|
895 |
+
if (!rv)
|
896 |
+
{
|
897 |
+
int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047);
|
898 |
+
mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity);
|
899 |
+
if (!b) stop_decoding(JPGD_NOTENOUGHMEM);
|
900 |
+
b->m_pNext = m_pMem_blocks; m_pMem_blocks = b;
|
901 |
+
b->m_used_count = nSize;
|
902 |
+
b->m_size = capacity;
|
903 |
+
rv = b->m_data;
|
904 |
+
}
|
905 |
+
if (zero) memset(rv, 0, nSize);
|
906 |
+
return rv;
|
907 |
+
}
|
908 |
+
|
909 |
+
void jpeg_decoder::word_clear(void *p, uint16 c, uint n)
|
910 |
+
{
|
911 |
+
uint8 *pD = (uint8*)p;
|
912 |
+
const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF;
|
913 |
+
while (n)
|
914 |
+
{
|
915 |
+
pD[0] = l; pD[1] = h; pD += 2;
|
916 |
+
n--;
|
917 |
+
}
|
918 |
+
}
|
919 |
+
|
920 |
+
// Refill the input buffer.
|
921 |
+
// This method will sit in a loop until (A) the buffer is full or (B)
|
922 |
+
// the stream's read() method reports and end of file condition.
|
923 |
+
void jpeg_decoder::prep_in_buffer()
|
924 |
+
{
|
925 |
+
m_in_buf_left = 0;
|
926 |
+
m_pIn_buf_ofs = m_in_buf;
|
927 |
+
|
928 |
+
if (m_eof_flag)
|
929 |
+
return;
|
930 |
+
|
931 |
+
do
|
932 |
+
{
|
933 |
+
int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag);
|
934 |
+
if (bytes_read == -1)
|
935 |
+
stop_decoding(JPGD_STREAM_READ);
|
936 |
+
|
937 |
+
m_in_buf_left += bytes_read;
|
938 |
+
} while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag));
|
939 |
+
|
940 |
+
m_total_bytes_read += m_in_buf_left;
|
941 |
+
|
942 |
+
// Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid).
|
943 |
+
// (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.)
|
944 |
+
word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64);
|
945 |
+
}
|
946 |
+
|
947 |
+
// Read a Huffman code table.
|
948 |
+
void jpeg_decoder::read_dht_marker()
|
949 |
+
{
|
950 |
+
int i, index, count;
|
951 |
+
uint8 huff_num[17];
|
952 |
+
uint8 huff_val[256];
|
953 |
+
|
954 |
+
uint num_left = get_bits(16);
|
955 |
+
|
956 |
+
if (num_left < 2)
|
957 |
+
stop_decoding(JPGD_BAD_DHT_MARKER);
|
958 |
+
|
959 |
+
num_left -= 2;
|
960 |
+
|
961 |
+
while (num_left)
|
962 |
+
{
|
963 |
+
index = get_bits(8);
|
964 |
+
|
965 |
+
huff_num[0] = 0;
|
966 |
+
|
967 |
+
count = 0;
|
968 |
+
|
969 |
+
for (i = 1; i <= 16; i++)
|
970 |
+
{
|
971 |
+
huff_num[i] = static_cast<uint8>(get_bits(8));
|
972 |
+
count += huff_num[i];
|
973 |
+
}
|
974 |
+
|
975 |
+
if (count > 255)
|
976 |
+
stop_decoding(JPGD_BAD_DHT_COUNTS);
|
977 |
+
|
978 |
+
for (i = 0; i < count; i++)
|
979 |
+
huff_val[i] = static_cast<uint8>(get_bits(8));
|
980 |
+
|
981 |
+
i = 1 + 16 + count;
|
982 |
+
|
983 |
+
if (num_left < (uint)i)
|
984 |
+
stop_decoding(JPGD_BAD_DHT_MARKER);
|
985 |
+
|
986 |
+
num_left -= i;
|
987 |
+
|
988 |
+
if ((index & 0x10) > 0x10)
|
989 |
+
stop_decoding(JPGD_BAD_DHT_INDEX);
|
990 |
+
|
991 |
+
index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1);
|
992 |
+
|
993 |
+
if (index >= JPGD_MAX_HUFF_TABLES)
|
994 |
+
stop_decoding(JPGD_BAD_DHT_INDEX);
|
995 |
+
|
996 |
+
if (!m_huff_num[index])
|
997 |
+
m_huff_num[index] = (uint8 *)alloc(17);
|
998 |
+
|
999 |
+
if (!m_huff_val[index])
|
1000 |
+
m_huff_val[index] = (uint8 *)alloc(256);
|
1001 |
+
|
1002 |
+
m_huff_ac[index] = (index & 0x10) != 0;
|
1003 |
+
memcpy(m_huff_num[index], huff_num, 17);
|
1004 |
+
memcpy(m_huff_val[index], huff_val, 256);
|
1005 |
+
}
|
1006 |
+
}
|
1007 |
+
|
1008 |
+
// Read a quantization table.
|
1009 |
+
void jpeg_decoder::read_dqt_marker()
|
1010 |
+
{
|
1011 |
+
int n, i, prec;
|
1012 |
+
uint num_left;
|
1013 |
+
uint temp;
|
1014 |
+
|
1015 |
+
num_left = get_bits(16);
|
1016 |
+
|
1017 |
+
if (num_left < 2)
|
1018 |
+
stop_decoding(JPGD_BAD_DQT_MARKER);
|
1019 |
+
|
1020 |
+
num_left -= 2;
|
1021 |
+
|
1022 |
+
while (num_left)
|
1023 |
+
{
|
1024 |
+
n = get_bits(8);
|
1025 |
+
prec = n >> 4;
|
1026 |
+
n &= 0x0F;
|
1027 |
+
|
1028 |
+
if (n >= JPGD_MAX_QUANT_TABLES)
|
1029 |
+
stop_decoding(JPGD_BAD_DQT_TABLE);
|
1030 |
+
|
1031 |
+
if (!m_quant[n])
|
1032 |
+
m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t));
|
1033 |
+
|
1034 |
+
// read quantization entries, in zag order
|
1035 |
+
for (i = 0; i < 64; i++)
|
1036 |
+
{
|
1037 |
+
temp = get_bits(8);
|
1038 |
+
|
1039 |
+
if (prec)
|
1040 |
+
temp = (temp << 8) + get_bits(8);
|
1041 |
+
|
1042 |
+
m_quant[n][i] = static_cast<jpgd_quant_t>(temp);
|
1043 |
+
}
|
1044 |
+
|
1045 |
+
i = 64 + 1;
|
1046 |
+
|
1047 |
+
if (prec)
|
1048 |
+
i += 64;
|
1049 |
+
|
1050 |
+
if (num_left < (uint)i)
|
1051 |
+
stop_decoding(JPGD_BAD_DQT_LENGTH);
|
1052 |
+
|
1053 |
+
num_left -= i;
|
1054 |
+
}
|
1055 |
+
}
|
1056 |
+
|
1057 |
+
// Read the start of frame (SOF) marker.
|
1058 |
+
void jpeg_decoder::read_sof_marker()
|
1059 |
+
{
|
1060 |
+
int i;
|
1061 |
+
uint num_left;
|
1062 |
+
|
1063 |
+
num_left = get_bits(16);
|
1064 |
+
|
1065 |
+
if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */
|
1066 |
+
stop_decoding(JPGD_BAD_PRECISION);
|
1067 |
+
|
1068 |
+
m_image_y_size = get_bits(16);
|
1069 |
+
|
1070 |
+
if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT))
|
1071 |
+
stop_decoding(JPGD_BAD_HEIGHT);
|
1072 |
+
|
1073 |
+
m_image_x_size = get_bits(16);
|
1074 |
+
|
1075 |
+
if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH))
|
1076 |
+
stop_decoding(JPGD_BAD_WIDTH);
|
1077 |
+
|
1078 |
+
m_comps_in_frame = get_bits(8);
|
1079 |
+
|
1080 |
+
if (m_comps_in_frame > JPGD_MAX_COMPONENTS)
|
1081 |
+
stop_decoding(JPGD_TOO_MANY_COMPONENTS);
|
1082 |
+
|
1083 |
+
if (num_left != (uint)(m_comps_in_frame * 3 + 8))
|
1084 |
+
stop_decoding(JPGD_BAD_SOF_LENGTH);
|
1085 |
+
|
1086 |
+
for (i = 0; i < m_comps_in_frame; i++)
|
1087 |
+
{
|
1088 |
+
m_comp_ident[i] = get_bits(8);
|
1089 |
+
m_comp_h_samp[i] = get_bits(4);
|
1090 |
+
m_comp_v_samp[i] = get_bits(4);
|
1091 |
+
m_comp_quant[i] = get_bits(8);
|
1092 |
+
}
|
1093 |
+
}
|
1094 |
+
|
1095 |
+
// Used to skip unrecognized markers.
|
1096 |
+
void jpeg_decoder::skip_variable_marker()
|
1097 |
+
{
|
1098 |
+
uint num_left;
|
1099 |
+
|
1100 |
+
num_left = get_bits(16);
|
1101 |
+
|
1102 |
+
if (num_left < 2)
|
1103 |
+
stop_decoding(JPGD_BAD_VARIABLE_MARKER);
|
1104 |
+
|
1105 |
+
num_left -= 2;
|
1106 |
+
|
1107 |
+
while (num_left)
|
1108 |
+
{
|
1109 |
+
get_bits(8);
|
1110 |
+
num_left--;
|
1111 |
+
}
|
1112 |
+
}
|
1113 |
+
|
1114 |
+
// Read a define restart interval (DRI) marker.
|
1115 |
+
void jpeg_decoder::read_dri_marker()
|
1116 |
+
{
|
1117 |
+
if (get_bits(16) != 4)
|
1118 |
+
stop_decoding(JPGD_BAD_DRI_LENGTH);
|
1119 |
+
|
1120 |
+
m_restart_interval = get_bits(16);
|
1121 |
+
}
|
1122 |
+
|
1123 |
+
// Read a start of scan (SOS) marker.
|
1124 |
+
void jpeg_decoder::read_sos_marker()
|
1125 |
+
{
|
1126 |
+
uint num_left;
|
1127 |
+
int i, ci, n, c, cc;
|
1128 |
+
|
1129 |
+
num_left = get_bits(16);
|
1130 |
+
|
1131 |
+
n = get_bits(8);
|
1132 |
+
|
1133 |
+
m_comps_in_scan = n;
|
1134 |
+
|
1135 |
+
num_left -= 3;
|
1136 |
+
|
1137 |
+
if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) )
|
1138 |
+
stop_decoding(JPGD_BAD_SOS_LENGTH);
|
1139 |
+
|
1140 |
+
for (i = 0; i < n; i++)
|
1141 |
+
{
|
1142 |
+
cc = get_bits(8);
|
1143 |
+
c = get_bits(8);
|
1144 |
+
num_left -= 2;
|
1145 |
+
|
1146 |
+
for (ci = 0; ci < m_comps_in_frame; ci++)
|
1147 |
+
if (cc == m_comp_ident[ci])
|
1148 |
+
break;
|
1149 |
+
|
1150 |
+
if (ci >= m_comps_in_frame)
|
1151 |
+
stop_decoding(JPGD_BAD_SOS_COMP_ID);
|
1152 |
+
|
1153 |
+
m_comp_list[i] = ci;
|
1154 |
+
m_comp_dc_tab[ci] = (c >> 4) & 15;
|
1155 |
+
m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1);
|
1156 |
+
}
|
1157 |
+
|
1158 |
+
m_spectral_start = get_bits(8);
|
1159 |
+
m_spectral_end = get_bits(8);
|
1160 |
+
m_successive_high = get_bits(4);
|
1161 |
+
m_successive_low = get_bits(4);
|
1162 |
+
|
1163 |
+
if (!m_progressive_flag)
|
1164 |
+
{
|
1165 |
+
m_spectral_start = 0;
|
1166 |
+
m_spectral_end = 63;
|
1167 |
+
}
|
1168 |
+
|
1169 |
+
num_left -= 3;
|
1170 |
+
|
1171 |
+
while (num_left) /* read past whatever is num_left */
|
1172 |
+
{
|
1173 |
+
get_bits(8);
|
1174 |
+
num_left--;
|
1175 |
+
}
|
1176 |
+
}
|
1177 |
+
|
1178 |
+
// Finds the next marker.
|
1179 |
+
int jpeg_decoder::next_marker()
|
1180 |
+
{
|
1181 |
+
uint c, bytes;
|
1182 |
+
|
1183 |
+
bytes = 0;
|
1184 |
+
|
1185 |
+
do
|
1186 |
+
{
|
1187 |
+
do
|
1188 |
+
{
|
1189 |
+
bytes++;
|
1190 |
+
c = get_bits(8);
|
1191 |
+
} while (c != 0xFF);
|
1192 |
+
|
1193 |
+
do
|
1194 |
+
{
|
1195 |
+
c = get_bits(8);
|
1196 |
+
} while (c == 0xFF);
|
1197 |
+
|
1198 |
+
} while (c == 0);
|
1199 |
+
|
1200 |
+
// If bytes > 0 here, there where extra bytes before the marker (not good).
|
1201 |
+
|
1202 |
+
return c;
|
1203 |
+
}
|
1204 |
+
|
1205 |
+
// Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is
|
1206 |
+
// encountered.
|
1207 |
+
int jpeg_decoder::process_markers()
|
1208 |
+
{
|
1209 |
+
int c;
|
1210 |
+
|
1211 |
+
for ( ; ; )
|
1212 |
+
{
|
1213 |
+
c = next_marker();
|
1214 |
+
|
1215 |
+
switch (c)
|
1216 |
+
{
|
1217 |
+
case M_SOF0:
|
1218 |
+
case M_SOF1:
|
1219 |
+
case M_SOF2:
|
1220 |
+
case M_SOF3:
|
1221 |
+
case M_SOF5:
|
1222 |
+
case M_SOF6:
|
1223 |
+
case M_SOF7:
|
1224 |
+
// case M_JPG:
|
1225 |
+
case M_SOF9:
|
1226 |
+
case M_SOF10:
|
1227 |
+
case M_SOF11:
|
1228 |
+
case M_SOF13:
|
1229 |
+
case M_SOF14:
|
1230 |
+
case M_SOF15:
|
1231 |
+
case M_SOI:
|
1232 |
+
case M_EOI:
|
1233 |
+
case M_SOS:
|
1234 |
+
{
|
1235 |
+
return c;
|
1236 |
+
}
|
1237 |
+
case M_DHT:
|
1238 |
+
{
|
1239 |
+
read_dht_marker();
|
1240 |
+
break;
|
1241 |
+
}
|
1242 |
+
// No arithmitic support - dumb patents!
|
1243 |
+
case M_DAC:
|
1244 |
+
{
|
1245 |
+
stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
|
1246 |
+
break;
|
1247 |
+
}
|
1248 |
+
case M_DQT:
|
1249 |
+
{
|
1250 |
+
read_dqt_marker();
|
1251 |
+
break;
|
1252 |
+
}
|
1253 |
+
case M_DRI:
|
1254 |
+
{
|
1255 |
+
read_dri_marker();
|
1256 |
+
break;
|
1257 |
+
}
|
1258 |
+
//case M_APP0: /* no need to read the JFIF marker */
|
1259 |
+
|
1260 |
+
case M_JPG:
|
1261 |
+
case M_RST0: /* no parameters */
|
1262 |
+
case M_RST1:
|
1263 |
+
case M_RST2:
|
1264 |
+
case M_RST3:
|
1265 |
+
case M_RST4:
|
1266 |
+
case M_RST5:
|
1267 |
+
case M_RST6:
|
1268 |
+
case M_RST7:
|
1269 |
+
case M_TEM:
|
1270 |
+
{
|
1271 |
+
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
1272 |
+
break;
|
1273 |
+
}
|
1274 |
+
default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */
|
1275 |
+
{
|
1276 |
+
skip_variable_marker();
|
1277 |
+
break;
|
1278 |
+
}
|
1279 |
+
}
|
1280 |
+
}
|
1281 |
+
}
|
1282 |
+
|
1283 |
+
// Finds the start of image (SOI) marker.
|
1284 |
+
// This code is rather defensive: it only checks the first 512 bytes to avoid
|
1285 |
+
// false positives.
|
1286 |
+
void jpeg_decoder::locate_soi_marker()
|
1287 |
+
{
|
1288 |
+
uint lastchar, thischar;
|
1289 |
+
uint bytesleft;
|
1290 |
+
|
1291 |
+
lastchar = get_bits(8);
|
1292 |
+
|
1293 |
+
thischar = get_bits(8);
|
1294 |
+
|
1295 |
+
/* ok if it's a normal JPEG file without a special header */
|
1296 |
+
|
1297 |
+
if ((lastchar == 0xFF) && (thischar == M_SOI))
|
1298 |
+
return;
|
1299 |
+
|
1300 |
+
bytesleft = 4096; //512;
|
1301 |
+
|
1302 |
+
for ( ; ; )
|
1303 |
+
{
|
1304 |
+
if (--bytesleft == 0)
|
1305 |
+
stop_decoding(JPGD_NOT_JPEG);
|
1306 |
+
|
1307 |
+
lastchar = thischar;
|
1308 |
+
|
1309 |
+
thischar = get_bits(8);
|
1310 |
+
|
1311 |
+
if (lastchar == 0xFF)
|
1312 |
+
{
|
1313 |
+
if (thischar == M_SOI)
|
1314 |
+
break;
|
1315 |
+
else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end
|
1316 |
+
stop_decoding(JPGD_NOT_JPEG);
|
1317 |
+
}
|
1318 |
+
}
|
1319 |
+
|
1320 |
+
// Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad.
|
1321 |
+
thischar = (m_bit_buf >> 24) & 0xFF;
|
1322 |
+
|
1323 |
+
if (thischar != 0xFF)
|
1324 |
+
stop_decoding(JPGD_NOT_JPEG);
|
1325 |
+
}
|
1326 |
+
|
1327 |
+
// Find a start of frame (SOF) marker.
|
1328 |
+
void jpeg_decoder::locate_sof_marker()
|
1329 |
+
{
|
1330 |
+
locate_soi_marker();
|
1331 |
+
|
1332 |
+
int c = process_markers();
|
1333 |
+
|
1334 |
+
switch (c)
|
1335 |
+
{
|
1336 |
+
case M_SOF2:
|
1337 |
+
m_progressive_flag = JPGD_TRUE;
|
1338 |
+
case M_SOF0: /* baseline DCT */
|
1339 |
+
case M_SOF1: /* extended sequential DCT */
|
1340 |
+
{
|
1341 |
+
read_sof_marker();
|
1342 |
+
break;
|
1343 |
+
}
|
1344 |
+
case M_SOF9: /* Arithmitic coding */
|
1345 |
+
{
|
1346 |
+
stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
|
1347 |
+
break;
|
1348 |
+
}
|
1349 |
+
default:
|
1350 |
+
{
|
1351 |
+
stop_decoding(JPGD_UNSUPPORTED_MARKER);
|
1352 |
+
break;
|
1353 |
+
}
|
1354 |
+
}
|
1355 |
+
}
|
1356 |
+
|
1357 |
+
// Find a start of scan (SOS) marker.
|
1358 |
+
int jpeg_decoder::locate_sos_marker()
|
1359 |
+
{
|
1360 |
+
int c;
|
1361 |
+
|
1362 |
+
c = process_markers();
|
1363 |
+
|
1364 |
+
if (c == M_EOI)
|
1365 |
+
return JPGD_FALSE;
|
1366 |
+
else if (c != M_SOS)
|
1367 |
+
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
1368 |
+
|
1369 |
+
read_sos_marker();
|
1370 |
+
|
1371 |
+
return JPGD_TRUE;
|
1372 |
+
}
|
1373 |
+
|
1374 |
+
// Reset everything to default/uninitialized state.
|
1375 |
+
void jpeg_decoder::init(jpeg_decoder_stream *pStream)
|
1376 |
+
{
|
1377 |
+
m_pMem_blocks = NULL;
|
1378 |
+
m_error_code = JPGD_SUCCESS;
|
1379 |
+
m_ready_flag = false;
|
1380 |
+
m_image_x_size = m_image_y_size = 0;
|
1381 |
+
m_pStream = pStream;
|
1382 |
+
m_progressive_flag = JPGD_FALSE;
|
1383 |
+
|
1384 |
+
memset(m_huff_ac, 0, sizeof(m_huff_ac));
|
1385 |
+
memset(m_huff_num, 0, sizeof(m_huff_num));
|
1386 |
+
memset(m_huff_val, 0, sizeof(m_huff_val));
|
1387 |
+
memset(m_quant, 0, sizeof(m_quant));
|
1388 |
+
|
1389 |
+
m_scan_type = 0;
|
1390 |
+
m_comps_in_frame = 0;
|
1391 |
+
|
1392 |
+
memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp));
|
1393 |
+
memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp));
|
1394 |
+
memset(m_comp_quant, 0, sizeof(m_comp_quant));
|
1395 |
+
memset(m_comp_ident, 0, sizeof(m_comp_ident));
|
1396 |
+
memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks));
|
1397 |
+
memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks));
|
1398 |
+
|
1399 |
+
m_comps_in_scan = 0;
|
1400 |
+
memset(m_comp_list, 0, sizeof(m_comp_list));
|
1401 |
+
memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab));
|
1402 |
+
memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab));
|
1403 |
+
|
1404 |
+
m_spectral_start = 0;
|
1405 |
+
m_spectral_end = 0;
|
1406 |
+
m_successive_low = 0;
|
1407 |
+
m_successive_high = 0;
|
1408 |
+
m_max_mcu_x_size = 0;
|
1409 |
+
m_max_mcu_y_size = 0;
|
1410 |
+
m_blocks_per_mcu = 0;
|
1411 |
+
m_max_blocks_per_row = 0;
|
1412 |
+
m_mcus_per_row = 0;
|
1413 |
+
m_mcus_per_col = 0;
|
1414 |
+
m_expanded_blocks_per_component = 0;
|
1415 |
+
m_expanded_blocks_per_mcu = 0;
|
1416 |
+
m_expanded_blocks_per_row = 0;
|
1417 |
+
m_freq_domain_chroma_upsample = false;
|
1418 |
+
|
1419 |
+
memset(m_mcu_org, 0, sizeof(m_mcu_org));
|
1420 |
+
|
1421 |
+
m_total_lines_left = 0;
|
1422 |
+
m_mcu_lines_left = 0;
|
1423 |
+
m_real_dest_bytes_per_scan_line = 0;
|
1424 |
+
m_dest_bytes_per_scan_line = 0;
|
1425 |
+
m_dest_bytes_per_pixel = 0;
|
1426 |
+
|
1427 |
+
memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs));
|
1428 |
+
|
1429 |
+
memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs));
|
1430 |
+
memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs));
|
1431 |
+
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
1432 |
+
|
1433 |
+
m_eob_run = 0;
|
1434 |
+
|
1435 |
+
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
1436 |
+
|
1437 |
+
m_pIn_buf_ofs = m_in_buf;
|
1438 |
+
m_in_buf_left = 0;
|
1439 |
+
m_eof_flag = false;
|
1440 |
+
m_tem_flag = 0;
|
1441 |
+
|
1442 |
+
memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start));
|
1443 |
+
memset(m_in_buf, 0, sizeof(m_in_buf));
|
1444 |
+
memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end));
|
1445 |
+
|
1446 |
+
m_restart_interval = 0;
|
1447 |
+
m_restarts_left = 0;
|
1448 |
+
m_next_restart_num = 0;
|
1449 |
+
|
1450 |
+
m_max_mcus_per_row = 0;
|
1451 |
+
m_max_blocks_per_mcu = 0;
|
1452 |
+
m_max_mcus_per_col = 0;
|
1453 |
+
|
1454 |
+
memset(m_last_dc_val, 0, sizeof(m_last_dc_val));
|
1455 |
+
m_pMCU_coefficients = NULL;
|
1456 |
+
m_pSample_buf = NULL;
|
1457 |
+
|
1458 |
+
m_total_bytes_read = 0;
|
1459 |
+
|
1460 |
+
m_pScan_line_0 = NULL;
|
1461 |
+
m_pScan_line_1 = NULL;
|
1462 |
+
|
1463 |
+
// Ready the input buffer.
|
1464 |
+
prep_in_buffer();
|
1465 |
+
|
1466 |
+
// Prime the bit buffer.
|
1467 |
+
m_bits_left = 16;
|
1468 |
+
m_bit_buf = 0;
|
1469 |
+
|
1470 |
+
get_bits(16);
|
1471 |
+
get_bits(16);
|
1472 |
+
|
1473 |
+
for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++)
|
1474 |
+
m_mcu_block_max_zag[i] = 64;
|
1475 |
+
}
|
1476 |
+
|
1477 |
+
#define SCALEBITS 16
|
1478 |
+
#define ONE_HALF ((int) 1 << (SCALEBITS-1))
|
1479 |
+
#define FIX(x) ((int) ((x) * (1L<<SCALEBITS) + 0.5f))
|
1480 |
+
|
1481 |
+
// Create a few tables that allow us to quickly convert YCbCr to RGB.
|
1482 |
+
void jpeg_decoder::create_look_ups()
|
1483 |
+
{
|
1484 |
+
for (int i = 0; i <= 255; i++)
|
1485 |
+
{
|
1486 |
+
int k = i - 128;
|
1487 |
+
m_crr[i] = ( FIX(1.40200f) * k + ONE_HALF) >> SCALEBITS;
|
1488 |
+
m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS;
|
1489 |
+
m_crg[i] = (-FIX(0.71414f)) * k;
|
1490 |
+
m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF;
|
1491 |
+
}
|
1492 |
+
}
|
1493 |
+
|
1494 |
+
// This method throws back into the stream any bytes that where read
|
1495 |
+
// into the bit buffer during initial marker scanning.
|
1496 |
+
void jpeg_decoder::fix_in_buffer()
|
1497 |
+
{
|
1498 |
+
// In case any 0xFF's where pulled into the buffer during marker scanning.
|
1499 |
+
JPGD_ASSERT((m_bits_left & 7) == 0);
|
1500 |
+
|
1501 |
+
if (m_bits_left == 16)
|
1502 |
+
stuff_char( (uint8)(m_bit_buf & 0xFF));
|
1503 |
+
|
1504 |
+
if (m_bits_left >= 8)
|
1505 |
+
stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF));
|
1506 |
+
|
1507 |
+
stuff_char((uint8)((m_bit_buf >> 16) & 0xFF));
|
1508 |
+
stuff_char((uint8)((m_bit_buf >> 24) & 0xFF));
|
1509 |
+
|
1510 |
+
m_bits_left = 16;
|
1511 |
+
get_bits_no_markers(16);
|
1512 |
+
get_bits_no_markers(16);
|
1513 |
+
}
|
1514 |
+
|
1515 |
+
void jpeg_decoder::transform_mcu(int mcu_row)
|
1516 |
+
{
|
1517 |
+
jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
|
1518 |
+
uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64;
|
1519 |
+
|
1520 |
+
for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
1521 |
+
{
|
1522 |
+
idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
|
1523 |
+
pSrc_ptr += 64;
|
1524 |
+
pDst_ptr += 64;
|
1525 |
+
}
|
1526 |
+
}
|
1527 |
+
|
1528 |
+
static const uint8 s_max_rc[64] =
|
1529 |
+
{
|
1530 |
+
17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86,
|
1531 |
+
102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136,
|
1532 |
+
136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136,
|
1533 |
+
136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136
|
1534 |
+
};
|
1535 |
+
|
1536 |
+
void jpeg_decoder::transform_mcu_expand(int mcu_row)
|
1537 |
+
{
|
1538 |
+
jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
|
1539 |
+
uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64;
|
1540 |
+
|
1541 |
+
// Y IDCT
|
1542 |
+
int mcu_block;
|
1543 |
+
for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++)
|
1544 |
+
{
|
1545 |
+
idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
|
1546 |
+
pSrc_ptr += 64;
|
1547 |
+
pDst_ptr += 64;
|
1548 |
+
}
|
1549 |
+
|
1550 |
+
// Chroma IDCT, with upsampling
|
1551 |
+
jpgd_block_t temp_block[64];
|
1552 |
+
|
1553 |
+
for (int i = 0; i < 2; i++)
|
1554 |
+
{
|
1555 |
+
DCT_Upsample::Matrix44 P, Q, R, S;
|
1556 |
+
|
1557 |
+
JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1);
|
1558 |
+
JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64);
|
1559 |
+
|
1560 |
+
switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1])
|
1561 |
+
{
|
1562 |
+
case 1*16+1:
|
1563 |
+
DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr);
|
1564 |
+
DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr);
|
1565 |
+
break;
|
1566 |
+
case 1*16+2:
|
1567 |
+
DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr);
|
1568 |
+
DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr);
|
1569 |
+
break;
|
1570 |
+
case 2*16+2:
|
1571 |
+
DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr);
|
1572 |
+
DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr);
|
1573 |
+
break;
|
1574 |
+
case 3*16+2:
|
1575 |
+
DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr);
|
1576 |
+
DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr);
|
1577 |
+
break;
|
1578 |
+
case 3*16+3:
|
1579 |
+
DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr);
|
1580 |
+
DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr);
|
1581 |
+
break;
|
1582 |
+
case 3*16+4:
|
1583 |
+
DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr);
|
1584 |
+
DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr);
|
1585 |
+
break;
|
1586 |
+
case 4*16+4:
|
1587 |
+
DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr);
|
1588 |
+
DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr);
|
1589 |
+
break;
|
1590 |
+
case 5*16+4:
|
1591 |
+
DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr);
|
1592 |
+
DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr);
|
1593 |
+
break;
|
1594 |
+
case 5*16+5:
|
1595 |
+
DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr);
|
1596 |
+
DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr);
|
1597 |
+
break;
|
1598 |
+
case 5*16+6:
|
1599 |
+
DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr);
|
1600 |
+
DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr);
|
1601 |
+
break;
|
1602 |
+
case 6*16+6:
|
1603 |
+
DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr);
|
1604 |
+
DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr);
|
1605 |
+
break;
|
1606 |
+
case 7*16+6:
|
1607 |
+
DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr);
|
1608 |
+
DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr);
|
1609 |
+
break;
|
1610 |
+
case 7*16+7:
|
1611 |
+
DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr);
|
1612 |
+
DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr);
|
1613 |
+
break;
|
1614 |
+
case 7*16+8:
|
1615 |
+
DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr);
|
1616 |
+
DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr);
|
1617 |
+
break;
|
1618 |
+
case 8*16+8:
|
1619 |
+
DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr);
|
1620 |
+
DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr);
|
1621 |
+
break;
|
1622 |
+
default:
|
1623 |
+
JPGD_ASSERT(false);
|
1624 |
+
}
|
1625 |
+
|
1626 |
+
DCT_Upsample::Matrix44 a(P + Q); P -= Q;
|
1627 |
+
DCT_Upsample::Matrix44& b = P;
|
1628 |
+
DCT_Upsample::Matrix44 c(R + S); R -= S;
|
1629 |
+
DCT_Upsample::Matrix44& d = R;
|
1630 |
+
|
1631 |
+
DCT_Upsample::Matrix44::add_and_store(temp_block, a, c);
|
1632 |
+
idct_4x4(temp_block, pDst_ptr);
|
1633 |
+
pDst_ptr += 64;
|
1634 |
+
|
1635 |
+
DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c);
|
1636 |
+
idct_4x4(temp_block, pDst_ptr);
|
1637 |
+
pDst_ptr += 64;
|
1638 |
+
|
1639 |
+
DCT_Upsample::Matrix44::add_and_store(temp_block, b, d);
|
1640 |
+
idct_4x4(temp_block, pDst_ptr);
|
1641 |
+
pDst_ptr += 64;
|
1642 |
+
|
1643 |
+
DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d);
|
1644 |
+
idct_4x4(temp_block, pDst_ptr);
|
1645 |
+
pDst_ptr += 64;
|
1646 |
+
|
1647 |
+
pSrc_ptr += 64;
|
1648 |
+
}
|
1649 |
+
}
|
1650 |
+
|
1651 |
+
// Loads and dequantizes the next row of (already decoded) coefficients.
|
1652 |
+
// Progressive images only.
|
1653 |
+
void jpeg_decoder::load_next_row()
|
1654 |
+
{
|
1655 |
+
int i;
|
1656 |
+
jpgd_block_t *p;
|
1657 |
+
jpgd_quant_t *q;
|
1658 |
+
int mcu_row, mcu_block, row_block = 0;
|
1659 |
+
int component_num, component_id;
|
1660 |
+
int block_x_mcu[JPGD_MAX_COMPONENTS];
|
1661 |
+
|
1662 |
+
memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int));
|
1663 |
+
|
1664 |
+
for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
1665 |
+
{
|
1666 |
+
int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
|
1667 |
+
|
1668 |
+
for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
1669 |
+
{
|
1670 |
+
component_id = m_mcu_org[mcu_block];
|
1671 |
+
q = m_quant[m_comp_quant[component_id]];
|
1672 |
+
|
1673 |
+
p = m_pMCU_coefficients + 64 * mcu_block;
|
1674 |
+
|
1675 |
+
jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
1676 |
+
jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
1677 |
+
p[0] = pDC[0];
|
1678 |
+
memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t));
|
1679 |
+
|
1680 |
+
for (i = 63; i > 0; i--)
|
1681 |
+
if (p[g_ZAG[i]])
|
1682 |
+
break;
|
1683 |
+
|
1684 |
+
m_mcu_block_max_zag[mcu_block] = i + 1;
|
1685 |
+
|
1686 |
+
for ( ; i >= 0; i--)
|
1687 |
+
if (p[g_ZAG[i]])
|
1688 |
+
p[g_ZAG[i]] = static_cast<jpgd_block_t>(p[g_ZAG[i]] * q[i]);
|
1689 |
+
|
1690 |
+
row_block++;
|
1691 |
+
|
1692 |
+
if (m_comps_in_scan == 1)
|
1693 |
+
block_x_mcu[component_id]++;
|
1694 |
+
else
|
1695 |
+
{
|
1696 |
+
if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
|
1697 |
+
{
|
1698 |
+
block_x_mcu_ofs = 0;
|
1699 |
+
|
1700 |
+
if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
|
1701 |
+
{
|
1702 |
+
block_y_mcu_ofs = 0;
|
1703 |
+
|
1704 |
+
block_x_mcu[component_id] += m_comp_h_samp[component_id];
|
1705 |
+
}
|
1706 |
+
}
|
1707 |
+
}
|
1708 |
+
}
|
1709 |
+
|
1710 |
+
if (m_freq_domain_chroma_upsample)
|
1711 |
+
transform_mcu_expand(mcu_row);
|
1712 |
+
else
|
1713 |
+
transform_mcu(mcu_row);
|
1714 |
+
}
|
1715 |
+
|
1716 |
+
if (m_comps_in_scan == 1)
|
1717 |
+
m_block_y_mcu[m_comp_list[0]]++;
|
1718 |
+
else
|
1719 |
+
{
|
1720 |
+
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
1721 |
+
{
|
1722 |
+
component_id = m_comp_list[component_num];
|
1723 |
+
|
1724 |
+
m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
|
1725 |
+
}
|
1726 |
+
}
|
1727 |
+
}
|
1728 |
+
|
1729 |
+
// Restart interval processing.
|
1730 |
+
void jpeg_decoder::process_restart()
|
1731 |
+
{
|
1732 |
+
int i;
|
1733 |
+
int c = 0;
|
1734 |
+
|
1735 |
+
// Align to a byte boundry
|
1736 |
+
// FIXME: Is this really necessary? get_bits_no_markers() never reads in markers!
|
1737 |
+
//get_bits_no_markers(m_bits_left & 7);
|
1738 |
+
|
1739 |
+
// Let's scan a little bit to find the marker, but not _too_ far.
|
1740 |
+
// 1536 is a "fudge factor" that determines how much to scan.
|
1741 |
+
for (i = 1536; i > 0; i--)
|
1742 |
+
if (get_char() == 0xFF)
|
1743 |
+
break;
|
1744 |
+
|
1745 |
+
if (i == 0)
|
1746 |
+
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1747 |
+
|
1748 |
+
for ( ; i > 0; i--)
|
1749 |
+
if ((c = get_char()) != 0xFF)
|
1750 |
+
break;
|
1751 |
+
|
1752 |
+
if (i == 0)
|
1753 |
+
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1754 |
+
|
1755 |
+
// Is it the expected marker? If not, something bad happened.
|
1756 |
+
if (c != (m_next_restart_num + M_RST0))
|
1757 |
+
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1758 |
+
|
1759 |
+
// Reset each component's DC prediction values.
|
1760 |
+
memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
|
1761 |
+
|
1762 |
+
m_eob_run = 0;
|
1763 |
+
|
1764 |
+
m_restarts_left = m_restart_interval;
|
1765 |
+
|
1766 |
+
m_next_restart_num = (m_next_restart_num + 1) & 7;
|
1767 |
+
|
1768 |
+
// Get the bit buffer going again...
|
1769 |
+
|
1770 |
+
m_bits_left = 16;
|
1771 |
+
get_bits_no_markers(16);
|
1772 |
+
get_bits_no_markers(16);
|
1773 |
+
}
|
1774 |
+
|
1775 |
+
static inline int dequantize_ac(int c, int q) { c *= q; return c; }
|
1776 |
+
|
1777 |
+
// Decodes and dequantizes the next row of coefficients.
|
1778 |
+
void jpeg_decoder::decode_next_row()
|
1779 |
+
{
|
1780 |
+
int row_block = 0;
|
1781 |
+
|
1782 |
+
for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
1783 |
+
{
|
1784 |
+
if ((m_restart_interval) && (m_restarts_left == 0))
|
1785 |
+
process_restart();
|
1786 |
+
|
1787 |
+
jpgd_block_t* p = m_pMCU_coefficients;
|
1788 |
+
for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64)
|
1789 |
+
{
|
1790 |
+
int component_id = m_mcu_org[mcu_block];
|
1791 |
+
jpgd_quant_t* q = m_quant[m_comp_quant[component_id]];
|
1792 |
+
|
1793 |
+
int r, s;
|
1794 |
+
s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r);
|
1795 |
+
s = HUFF_EXTEND(r, s);
|
1796 |
+
|
1797 |
+
m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]);
|
1798 |
+
|
1799 |
+
p[0] = static_cast<jpgd_block_t>(s * q[0]);
|
1800 |
+
|
1801 |
+
int prev_num_set = m_mcu_block_max_zag[mcu_block];
|
1802 |
+
|
1803 |
+
huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]];
|
1804 |
+
|
1805 |
+
int k;
|
1806 |
+
for (k = 1; k < 64; k++)
|
1807 |
+
{
|
1808 |
+
int extra_bits;
|
1809 |
+
s = huff_decode(pH, extra_bits);
|
1810 |
+
|
1811 |
+
r = s >> 4;
|
1812 |
+
s &= 15;
|
1813 |
+
|
1814 |
+
if (s)
|
1815 |
+
{
|
1816 |
+
if (r)
|
1817 |
+
{
|
1818 |
+
if ((k + r) > 63)
|
1819 |
+
stop_decoding(JPGD_DECODE_ERROR);
|
1820 |
+
|
1821 |
+
if (k < prev_num_set)
|
1822 |
+
{
|
1823 |
+
int n = JPGD_MIN(r, prev_num_set - k);
|
1824 |
+
int kt = k;
|
1825 |
+
while (n--)
|
1826 |
+
p[g_ZAG[kt++]] = 0;
|
1827 |
+
}
|
1828 |
+
|
1829 |
+
k += r;
|
1830 |
+
}
|
1831 |
+
|
1832 |
+
s = HUFF_EXTEND(extra_bits, s);
|
1833 |
+
|
1834 |
+
JPGD_ASSERT(k < 64);
|
1835 |
+
|
1836 |
+
p[g_ZAG[k]] = static_cast<jpgd_block_t>(dequantize_ac(s, q[k])); //s * q[k];
|
1837 |
+
}
|
1838 |
+
else
|
1839 |
+
{
|
1840 |
+
if (r == 15)
|
1841 |
+
{
|
1842 |
+
if ((k + 16) > 64)
|
1843 |
+
stop_decoding(JPGD_DECODE_ERROR);
|
1844 |
+
|
1845 |
+
if (k < prev_num_set)
|
1846 |
+
{
|
1847 |
+
int n = JPGD_MIN(16, prev_num_set - k);
|
1848 |
+
int kt = k;
|
1849 |
+
while (n--)
|
1850 |
+
{
|
1851 |
+
JPGD_ASSERT(kt <= 63);
|
1852 |
+
p[g_ZAG[kt++]] = 0;
|
1853 |
+
}
|
1854 |
+
}
|
1855 |
+
|
1856 |
+
k += 16 - 1; // - 1 because the loop counter is k
|
1857 |
+
// BEGIN EPIC MOD
|
1858 |
+
JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0);
|
1859 |
+
// END EPIC MOD
|
1860 |
+
}
|
1861 |
+
else
|
1862 |
+
break;
|
1863 |
+
}
|
1864 |
+
}
|
1865 |
+
|
1866 |
+
if (k < prev_num_set)
|
1867 |
+
{
|
1868 |
+
int kt = k;
|
1869 |
+
while (kt < prev_num_set)
|
1870 |
+
p[g_ZAG[kt++]] = 0;
|
1871 |
+
}
|
1872 |
+
|
1873 |
+
m_mcu_block_max_zag[mcu_block] = k;
|
1874 |
+
|
1875 |
+
row_block++;
|
1876 |
+
}
|
1877 |
+
|
1878 |
+
if (m_freq_domain_chroma_upsample)
|
1879 |
+
transform_mcu_expand(mcu_row);
|
1880 |
+
else
|
1881 |
+
transform_mcu(mcu_row);
|
1882 |
+
|
1883 |
+
m_restarts_left--;
|
1884 |
+
}
|
1885 |
+
}
|
1886 |
+
|
1887 |
+
// YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB
|
1888 |
+
void jpeg_decoder::H1V1Convert()
|
1889 |
+
{
|
1890 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1891 |
+
uint8 *d = m_pScan_line_0;
|
1892 |
+
uint8 *s = m_pSample_buf + row * 8;
|
1893 |
+
|
1894 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1895 |
+
{
|
1896 |
+
for (int j = 0; j < 8; j++)
|
1897 |
+
{
|
1898 |
+
int y = s[j];
|
1899 |
+
int cb = s[64+j];
|
1900 |
+
int cr = s[128+j];
|
1901 |
+
|
1902 |
+
if (jpg_format == ERGBFormatJPG::BGRA)
|
1903 |
+
{
|
1904 |
+
d[0] = clamp(y + m_cbb[cb]);
|
1905 |
+
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
1906 |
+
d[2] = clamp(y + m_crr[cr]);
|
1907 |
+
d[3] = 255;
|
1908 |
+
}
|
1909 |
+
else
|
1910 |
+
{
|
1911 |
+
d[0] = clamp(y + m_crr[cr]);
|
1912 |
+
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
1913 |
+
d[2] = clamp(y + m_cbb[cb]);
|
1914 |
+
d[3] = 255;
|
1915 |
+
}
|
1916 |
+
d += 4;
|
1917 |
+
}
|
1918 |
+
|
1919 |
+
s += 64*3;
|
1920 |
+
}
|
1921 |
+
}
|
1922 |
+
|
1923 |
+
// YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB
|
1924 |
+
void jpeg_decoder::H2V1Convert()
|
1925 |
+
{
|
1926 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1927 |
+
uint8 *d0 = m_pScan_line_0;
|
1928 |
+
uint8 *y = m_pSample_buf + row * 8;
|
1929 |
+
uint8 *c = m_pSample_buf + 2*64 + row * 8;
|
1930 |
+
|
1931 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1932 |
+
{
|
1933 |
+
for (int l = 0; l < 2; l++)
|
1934 |
+
{
|
1935 |
+
for (int j = 0; j < 4; j++)
|
1936 |
+
{
|
1937 |
+
int cb = c[0];
|
1938 |
+
int cr = c[64];
|
1939 |
+
|
1940 |
+
int rc = m_crr[cr];
|
1941 |
+
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
1942 |
+
int bc = m_cbb[cb];
|
1943 |
+
|
1944 |
+
int yy = y[j<<1];
|
1945 |
+
if (jpg_format == ERGBFormatJPG::BGRA)
|
1946 |
+
{
|
1947 |
+
d0[0] = clamp(yy+bc);
|
1948 |
+
d0[1] = clamp(yy+gc);
|
1949 |
+
d0[2] = clamp(yy+rc);
|
1950 |
+
d0[3] = 255;
|
1951 |
+
yy = y[(j<<1)+1];
|
1952 |
+
d0[4] = clamp(yy+bc);
|
1953 |
+
d0[5] = clamp(yy+gc);
|
1954 |
+
d0[6] = clamp(yy+rc);
|
1955 |
+
d0[7] = 255;
|
1956 |
+
}
|
1957 |
+
else
|
1958 |
+
{
|
1959 |
+
d0[0] = clamp(yy+rc);
|
1960 |
+
d0[1] = clamp(yy+gc);
|
1961 |
+
d0[2] = clamp(yy+bc);
|
1962 |
+
d0[3] = 255;
|
1963 |
+
yy = y[(j<<1)+1];
|
1964 |
+
d0[4] = clamp(yy+rc);
|
1965 |
+
d0[5] = clamp(yy+gc);
|
1966 |
+
d0[6] = clamp(yy+bc);
|
1967 |
+
d0[7] = 255;
|
1968 |
+
}
|
1969 |
+
|
1970 |
+
d0 += 8;
|
1971 |
+
|
1972 |
+
c++;
|
1973 |
+
}
|
1974 |
+
y += 64;
|
1975 |
+
}
|
1976 |
+
|
1977 |
+
y += 64*4 - 64*2;
|
1978 |
+
c += 64*4 - 8;
|
1979 |
+
}
|
1980 |
+
}
|
1981 |
+
|
1982 |
+
// YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB
|
1983 |
+
void jpeg_decoder::H1V2Convert()
|
1984 |
+
{
|
1985 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1986 |
+
uint8 *d0 = m_pScan_line_0;
|
1987 |
+
uint8 *d1 = m_pScan_line_1;
|
1988 |
+
uint8 *y;
|
1989 |
+
uint8 *c;
|
1990 |
+
|
1991 |
+
if (row < 8)
|
1992 |
+
y = m_pSample_buf + row * 8;
|
1993 |
+
else
|
1994 |
+
y = m_pSample_buf + 64*1 + (row & 7) * 8;
|
1995 |
+
|
1996 |
+
c = m_pSample_buf + 64*2 + (row >> 1) * 8;
|
1997 |
+
|
1998 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1999 |
+
{
|
2000 |
+
for (int j = 0; j < 8; j++)
|
2001 |
+
{
|
2002 |
+
int cb = c[0+j];
|
2003 |
+
int cr = c[64+j];
|
2004 |
+
|
2005 |
+
int rc = m_crr[cr];
|
2006 |
+
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
2007 |
+
int bc = m_cbb[cb];
|
2008 |
+
|
2009 |
+
int yy = y[j];
|
2010 |
+
if (jpg_format == ERGBFormatJPG::BGRA)
|
2011 |
+
{
|
2012 |
+
d0[0] = clamp(yy+bc);
|
2013 |
+
d0[1] = clamp(yy+gc);
|
2014 |
+
d0[2] = clamp(yy+rc);
|
2015 |
+
d0[3] = 255;
|
2016 |
+
yy = y[8+j];
|
2017 |
+
d1[0] = clamp(yy+bc);
|
2018 |
+
d1[1] = clamp(yy+gc);
|
2019 |
+
d1[2] = clamp(yy+rc);
|
2020 |
+
d1[3] = 255;
|
2021 |
+
}
|
2022 |
+
else
|
2023 |
+
{
|
2024 |
+
d0[0] = clamp(yy+rc);
|
2025 |
+
d0[1] = clamp(yy+gc);
|
2026 |
+
d0[2] = clamp(yy+bc);
|
2027 |
+
d0[3] = 255;
|
2028 |
+
yy = y[8+j];
|
2029 |
+
d1[0] = clamp(yy+rc);
|
2030 |
+
d1[1] = clamp(yy+gc);
|
2031 |
+
d1[2] = clamp(yy+bc);
|
2032 |
+
d1[3] = 255;
|
2033 |
+
}
|
2034 |
+
|
2035 |
+
d0 += 4;
|
2036 |
+
d1 += 4;
|
2037 |
+
}
|
2038 |
+
|
2039 |
+
y += 64*4;
|
2040 |
+
c += 64*4;
|
2041 |
+
}
|
2042 |
+
}
|
2043 |
+
|
2044 |
+
// YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB
|
2045 |
+
void jpeg_decoder::H2V2Convert()
|
2046 |
+
{
|
2047 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2048 |
+
uint8 *d0 = m_pScan_line_0;
|
2049 |
+
uint8 *d1 = m_pScan_line_1;
|
2050 |
+
uint8 *y;
|
2051 |
+
uint8 *c;
|
2052 |
+
|
2053 |
+
if (row < 8)
|
2054 |
+
y = m_pSample_buf + row * 8;
|
2055 |
+
else
|
2056 |
+
y = m_pSample_buf + 64*2 + (row & 7) * 8;
|
2057 |
+
|
2058 |
+
c = m_pSample_buf + 64*4 + (row >> 1) * 8;
|
2059 |
+
|
2060 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2061 |
+
{
|
2062 |
+
for (int l = 0; l < 2; l++)
|
2063 |
+
{
|
2064 |
+
for (int j = 0; j < 8; j += 2)
|
2065 |
+
{
|
2066 |
+
int cb = c[0];
|
2067 |
+
int cr = c[64];
|
2068 |
+
|
2069 |
+
int rc = m_crr[cr];
|
2070 |
+
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
2071 |
+
int bc = m_cbb[cb];
|
2072 |
+
|
2073 |
+
int yy = y[j];
|
2074 |
+
if (jpg_format == ERGBFormatJPG::BGRA)
|
2075 |
+
{
|
2076 |
+
d0[0] = clamp(yy+bc);
|
2077 |
+
d0[1] = clamp(yy+gc);
|
2078 |
+
d0[2] = clamp(yy+rc);
|
2079 |
+
d0[3] = 255;
|
2080 |
+
yy = y[j+1];
|
2081 |
+
d0[4] = clamp(yy+bc);
|
2082 |
+
d0[5] = clamp(yy+gc);
|
2083 |
+
d0[6] = clamp(yy+rc);
|
2084 |
+
d0[7] = 255;
|
2085 |
+
yy = y[j+8];
|
2086 |
+
d1[0] = clamp(yy+bc);
|
2087 |
+
d1[1] = clamp(yy+gc);
|
2088 |
+
d1[2] = clamp(yy+rc);
|
2089 |
+
d1[3] = 255;
|
2090 |
+
yy = y[j+8+1];
|
2091 |
+
d1[4] = clamp(yy+bc);
|
2092 |
+
d1[5] = clamp(yy+gc);
|
2093 |
+
d1[6] = clamp(yy+rc);
|
2094 |
+
d1[7] = 255;
|
2095 |
+
}
|
2096 |
+
else
|
2097 |
+
{
|
2098 |
+
d0[0] = clamp(yy+rc);
|
2099 |
+
d0[1] = clamp(yy+gc);
|
2100 |
+
d0[2] = clamp(yy+bc);
|
2101 |
+
d0[3] = 255;
|
2102 |
+
yy = y[j+1];
|
2103 |
+
d0[4] = clamp(yy+rc);
|
2104 |
+
d0[5] = clamp(yy+gc);
|
2105 |
+
d0[6] = clamp(yy+bc);
|
2106 |
+
d0[7] = 255;
|
2107 |
+
yy = y[j+8];
|
2108 |
+
d1[0] = clamp(yy+rc);
|
2109 |
+
d1[1] = clamp(yy+gc);
|
2110 |
+
d1[2] = clamp(yy+bc);
|
2111 |
+
d1[3] = 255;
|
2112 |
+
yy = y[j+8+1];
|
2113 |
+
d1[4] = clamp(yy+rc);
|
2114 |
+
d1[5] = clamp(yy+gc);
|
2115 |
+
d1[6] = clamp(yy+bc);
|
2116 |
+
d1[7] = 255;
|
2117 |
+
}
|
2118 |
+
|
2119 |
+
d0 += 8;
|
2120 |
+
d1 += 8;
|
2121 |
+
|
2122 |
+
c++;
|
2123 |
+
}
|
2124 |
+
y += 64;
|
2125 |
+
}
|
2126 |
+
|
2127 |
+
y += 64*6 - 64*2;
|
2128 |
+
c += 64*6 - 8;
|
2129 |
+
}
|
2130 |
+
}
|
2131 |
+
|
2132 |
+
// Y (1 block per MCU) to 8-bit grayscale
|
2133 |
+
void jpeg_decoder::gray_convert()
|
2134 |
+
{
|
2135 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2136 |
+
uint8 *d = m_pScan_line_0;
|
2137 |
+
uint8 *s = m_pSample_buf + row * 8;
|
2138 |
+
|
2139 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2140 |
+
{
|
2141 |
+
*(uint *)d = *(uint *)s;
|
2142 |
+
*(uint *)(&d[4]) = *(uint *)(&s[4]);
|
2143 |
+
|
2144 |
+
s += 64;
|
2145 |
+
d += 8;
|
2146 |
+
}
|
2147 |
+
}
|
2148 |
+
|
2149 |
+
void jpeg_decoder::expanded_convert()
|
2150 |
+
{
|
2151 |
+
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2152 |
+
|
2153 |
+
uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8;
|
2154 |
+
|
2155 |
+
uint8* d = m_pScan_line_0;
|
2156 |
+
|
2157 |
+
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2158 |
+
{
|
2159 |
+
for (int k = 0; k < m_max_mcu_x_size; k += 8)
|
2160 |
+
{
|
2161 |
+
const int Y_ofs = k * 8;
|
2162 |
+
const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component;
|
2163 |
+
const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2;
|
2164 |
+
for (int j = 0; j < 8; j++)
|
2165 |
+
{
|
2166 |
+
int y = Py[Y_ofs + j];
|
2167 |
+
int cb = Py[Cb_ofs + j];
|
2168 |
+
int cr = Py[Cr_ofs + j];
|
2169 |
+
|
2170 |
+
if (jpg_format == ERGBFormatJPG::BGRA)
|
2171 |
+
{
|
2172 |
+
d[0] = clamp(y + m_cbb[cb]);
|
2173 |
+
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
2174 |
+
d[2] = clamp(y + m_crr[cr]);
|
2175 |
+
d[3] = 255;
|
2176 |
+
}
|
2177 |
+
else
|
2178 |
+
{
|
2179 |
+
d[0] = clamp(y + m_crr[cr]);
|
2180 |
+
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
2181 |
+
d[2] = clamp(y + m_cbb[cb]);
|
2182 |
+
d[3] = 255;
|
2183 |
+
}
|
2184 |
+
|
2185 |
+
d += 4;
|
2186 |
+
}
|
2187 |
+
}
|
2188 |
+
|
2189 |
+
Py += 64 * m_expanded_blocks_per_mcu;
|
2190 |
+
}
|
2191 |
+
}
|
2192 |
+
|
2193 |
+
// Find end of image (EOI) marker, so we can return to the user the exact size of the input stream.
|
2194 |
+
void jpeg_decoder::find_eoi()
|
2195 |
+
{
|
2196 |
+
if (!m_progressive_flag)
|
2197 |
+
{
|
2198 |
+
// Attempt to read the EOI marker.
|
2199 |
+
//get_bits_no_markers(m_bits_left & 7);
|
2200 |
+
|
2201 |
+
// Prime the bit buffer
|
2202 |
+
m_bits_left = 16;
|
2203 |
+
get_bits(16);
|
2204 |
+
get_bits(16);
|
2205 |
+
|
2206 |
+
// The next marker _should_ be EOI
|
2207 |
+
process_markers();
|
2208 |
+
}
|
2209 |
+
|
2210 |
+
m_total_bytes_read -= m_in_buf_left;
|
2211 |
+
}
|
2212 |
+
|
2213 |
+
int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len)
|
2214 |
+
{
|
2215 |
+
if ((m_error_code) || (!m_ready_flag))
|
2216 |
+
return JPGD_FAILED;
|
2217 |
+
|
2218 |
+
if (m_total_lines_left == 0)
|
2219 |
+
return JPGD_DONE;
|
2220 |
+
|
2221 |
+
if (m_mcu_lines_left == 0)
|
2222 |
+
{
|
2223 |
+
if (setjmp(m_jmp_state))
|
2224 |
+
return JPGD_FAILED;
|
2225 |
+
|
2226 |
+
if (m_progressive_flag)
|
2227 |
+
load_next_row();
|
2228 |
+
else
|
2229 |
+
decode_next_row();
|
2230 |
+
|
2231 |
+
// Find the EOI marker if that was the last row.
|
2232 |
+
if (m_total_lines_left <= m_max_mcu_y_size)
|
2233 |
+
find_eoi();
|
2234 |
+
|
2235 |
+
m_mcu_lines_left = m_max_mcu_y_size;
|
2236 |
+
}
|
2237 |
+
|
2238 |
+
if (m_freq_domain_chroma_upsample)
|
2239 |
+
{
|
2240 |
+
expanded_convert();
|
2241 |
+
*pScan_line = m_pScan_line_0;
|
2242 |
+
}
|
2243 |
+
else
|
2244 |
+
{
|
2245 |
+
switch (m_scan_type)
|
2246 |
+
{
|
2247 |
+
case JPGD_YH2V2:
|
2248 |
+
{
|
2249 |
+
if ((m_mcu_lines_left & 1) == 0)
|
2250 |
+
{
|
2251 |
+
H2V2Convert();
|
2252 |
+
*pScan_line = m_pScan_line_0;
|
2253 |
+
}
|
2254 |
+
else
|
2255 |
+
*pScan_line = m_pScan_line_1;
|
2256 |
+
|
2257 |
+
break;
|
2258 |
+
}
|
2259 |
+
case JPGD_YH2V1:
|
2260 |
+
{
|
2261 |
+
H2V1Convert();
|
2262 |
+
*pScan_line = m_pScan_line_0;
|
2263 |
+
break;
|
2264 |
+
}
|
2265 |
+
case JPGD_YH1V2:
|
2266 |
+
{
|
2267 |
+
if ((m_mcu_lines_left & 1) == 0)
|
2268 |
+
{
|
2269 |
+
H1V2Convert();
|
2270 |
+
*pScan_line = m_pScan_line_0;
|
2271 |
+
}
|
2272 |
+
else
|
2273 |
+
*pScan_line = m_pScan_line_1;
|
2274 |
+
|
2275 |
+
break;
|
2276 |
+
}
|
2277 |
+
case JPGD_YH1V1:
|
2278 |
+
{
|
2279 |
+
H1V1Convert();
|
2280 |
+
*pScan_line = m_pScan_line_0;
|
2281 |
+
break;
|
2282 |
+
}
|
2283 |
+
case JPGD_GRAYSCALE:
|
2284 |
+
{
|
2285 |
+
gray_convert();
|
2286 |
+
*pScan_line = m_pScan_line_0;
|
2287 |
+
|
2288 |
+
break;
|
2289 |
+
}
|
2290 |
+
}
|
2291 |
+
}
|
2292 |
+
|
2293 |
+
*pScan_line_len = m_real_dest_bytes_per_scan_line;
|
2294 |
+
|
2295 |
+
m_mcu_lines_left--;
|
2296 |
+
m_total_lines_left--;
|
2297 |
+
|
2298 |
+
return JPGD_SUCCESS;
|
2299 |
+
}
|
2300 |
+
|
2301 |
+
// Creates the tables needed for efficient Huffman decoding.
|
2302 |
+
void jpeg_decoder::make_huff_table(int index, huff_tables *pH)
|
2303 |
+
{
|
2304 |
+
int p, i, l, si;
|
2305 |
+
uint8 huffsize[257];
|
2306 |
+
uint huffcode[257];
|
2307 |
+
uint code;
|
2308 |
+
uint subtree;
|
2309 |
+
int code_size;
|
2310 |
+
int lastp;
|
2311 |
+
int nextfreeentry;
|
2312 |
+
int currententry;
|
2313 |
+
|
2314 |
+
pH->ac_table = m_huff_ac[index] != 0;
|
2315 |
+
|
2316 |
+
p = 0;
|
2317 |
+
|
2318 |
+
for (l = 1; l <= 16; l++)
|
2319 |
+
{
|
2320 |
+
for (i = 1; i <= m_huff_num[index][l]; i++)
|
2321 |
+
huffsize[p++] = static_cast<uint8>(l);
|
2322 |
+
}
|
2323 |
+
|
2324 |
+
huffsize[p] = 0;
|
2325 |
+
|
2326 |
+
lastp = p;
|
2327 |
+
|
2328 |
+
code = 0;
|
2329 |
+
si = huffsize[0];
|
2330 |
+
p = 0;
|
2331 |
+
|
2332 |
+
while (huffsize[p])
|
2333 |
+
{
|
2334 |
+
while (huffsize[p] == si)
|
2335 |
+
{
|
2336 |
+
huffcode[p++] = code;
|
2337 |
+
code++;
|
2338 |
+
}
|
2339 |
+
|
2340 |
+
code <<= 1;
|
2341 |
+
si++;
|
2342 |
+
}
|
2343 |
+
|
2344 |
+
memset(pH->look_up, 0, sizeof(pH->look_up));
|
2345 |
+
memset(pH->look_up2, 0, sizeof(pH->look_up2));
|
2346 |
+
memset(pH->tree, 0, sizeof(pH->tree));
|
2347 |
+
memset(pH->code_size, 0, sizeof(pH->code_size));
|
2348 |
+
|
2349 |
+
nextfreeentry = -1;
|
2350 |
+
|
2351 |
+
p = 0;
|
2352 |
+
|
2353 |
+
while (p < lastp)
|
2354 |
+
{
|
2355 |
+
i = m_huff_val[index][p];
|
2356 |
+
code = huffcode[p];
|
2357 |
+
code_size = huffsize[p];
|
2358 |
+
|
2359 |
+
pH->code_size[i] = static_cast<uint8>(code_size);
|
2360 |
+
|
2361 |
+
if (code_size <= 8)
|
2362 |
+
{
|
2363 |
+
code <<= (8 - code_size);
|
2364 |
+
|
2365 |
+
for (l = 1 << (8 - code_size); l > 0; l--)
|
2366 |
+
{
|
2367 |
+
JPGD_ASSERT(i < 256);
|
2368 |
+
|
2369 |
+
pH->look_up[code] = i;
|
2370 |
+
|
2371 |
+
bool has_extrabits = false;
|
2372 |
+
int extra_bits = 0;
|
2373 |
+
int num_extra_bits = i & 15;
|
2374 |
+
|
2375 |
+
int bits_to_fetch = code_size;
|
2376 |
+
if (num_extra_bits)
|
2377 |
+
{
|
2378 |
+
int total_codesize = code_size + num_extra_bits;
|
2379 |
+
if (total_codesize <= 8)
|
2380 |
+
{
|
2381 |
+
has_extrabits = true;
|
2382 |
+
extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize));
|
2383 |
+
JPGD_ASSERT(extra_bits <= 0x7FFF);
|
2384 |
+
bits_to_fetch += num_extra_bits;
|
2385 |
+
}
|
2386 |
+
}
|
2387 |
+
|
2388 |
+
if (!has_extrabits)
|
2389 |
+
pH->look_up2[code] = i | (bits_to_fetch << 8);
|
2390 |
+
else
|
2391 |
+
pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8);
|
2392 |
+
|
2393 |
+
code++;
|
2394 |
+
}
|
2395 |
+
}
|
2396 |
+
else
|
2397 |
+
{
|
2398 |
+
subtree = (code >> (code_size - 8)) & 0xFF;
|
2399 |
+
|
2400 |
+
currententry = pH->look_up[subtree];
|
2401 |
+
|
2402 |
+
if (currententry == 0)
|
2403 |
+
{
|
2404 |
+
pH->look_up[subtree] = currententry = nextfreeentry;
|
2405 |
+
pH->look_up2[subtree] = currententry = nextfreeentry;
|
2406 |
+
|
2407 |
+
nextfreeentry -= 2;
|
2408 |
+
}
|
2409 |
+
|
2410 |
+
code <<= (16 - (code_size - 8));
|
2411 |
+
|
2412 |
+
for (l = code_size; l > 9; l--)
|
2413 |
+
{
|
2414 |
+
if ((code & 0x8000) == 0)
|
2415 |
+
currententry--;
|
2416 |
+
|
2417 |
+
if (pH->tree[-currententry - 1] == 0)
|
2418 |
+
{
|
2419 |
+
pH->tree[-currententry - 1] = nextfreeentry;
|
2420 |
+
|
2421 |
+
currententry = nextfreeentry;
|
2422 |
+
|
2423 |
+
nextfreeentry -= 2;
|
2424 |
+
}
|
2425 |
+
else
|
2426 |
+
currententry = pH->tree[-currententry - 1];
|
2427 |
+
|
2428 |
+
code <<= 1;
|
2429 |
+
}
|
2430 |
+
|
2431 |
+
if ((code & 0x8000) == 0)
|
2432 |
+
currententry--;
|
2433 |
+
|
2434 |
+
pH->tree[-currententry - 1] = i;
|
2435 |
+
}
|
2436 |
+
|
2437 |
+
p++;
|
2438 |
+
}
|
2439 |
+
}
|
2440 |
+
|
2441 |
+
// Verifies the quantization tables needed for this scan are available.
|
2442 |
+
void jpeg_decoder::check_quant_tables()
|
2443 |
+
{
|
2444 |
+
for (int i = 0; i < m_comps_in_scan; i++)
|
2445 |
+
if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL)
|
2446 |
+
stop_decoding(JPGD_UNDEFINED_QUANT_TABLE);
|
2447 |
+
}
|
2448 |
+
|
2449 |
+
// Verifies that all the Huffman tables needed for this scan are available.
|
2450 |
+
void jpeg_decoder::check_huff_tables()
|
2451 |
+
{
|
2452 |
+
for (int i = 0; i < m_comps_in_scan; i++)
|
2453 |
+
{
|
2454 |
+
if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL))
|
2455 |
+
stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
|
2456 |
+
|
2457 |
+
if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL))
|
2458 |
+
stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
|
2459 |
+
}
|
2460 |
+
|
2461 |
+
for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++)
|
2462 |
+
if (m_huff_num[i])
|
2463 |
+
{
|
2464 |
+
if (!m_pHuff_tabs[i])
|
2465 |
+
m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables));
|
2466 |
+
|
2467 |
+
make_huff_table(i, m_pHuff_tabs[i]);
|
2468 |
+
}
|
2469 |
+
}
|
2470 |
+
|
2471 |
+
// Determines the component order inside each MCU.
|
2472 |
+
// Also calcs how many MCU's are on each row, etc.
|
2473 |
+
void jpeg_decoder::calc_mcu_block_order()
|
2474 |
+
{
|
2475 |
+
int component_num, component_id;
|
2476 |
+
int max_h_samp = 0, max_v_samp = 0;
|
2477 |
+
|
2478 |
+
for (component_id = 0; component_id < m_comps_in_frame; component_id++)
|
2479 |
+
{
|
2480 |
+
if (m_comp_h_samp[component_id] > max_h_samp)
|
2481 |
+
max_h_samp = m_comp_h_samp[component_id];
|
2482 |
+
|
2483 |
+
if (m_comp_v_samp[component_id] > max_v_samp)
|
2484 |
+
max_v_samp = m_comp_v_samp[component_id];
|
2485 |
+
}
|
2486 |
+
|
2487 |
+
for (component_id = 0; component_id < m_comps_in_frame; component_id++)
|
2488 |
+
{
|
2489 |
+
m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8;
|
2490 |
+
m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8;
|
2491 |
+
}
|
2492 |
+
|
2493 |
+
if (m_comps_in_scan == 1)
|
2494 |
+
{
|
2495 |
+
m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]];
|
2496 |
+
m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]];
|
2497 |
+
}
|
2498 |
+
else
|
2499 |
+
{
|
2500 |
+
m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp;
|
2501 |
+
m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp;
|
2502 |
+
}
|
2503 |
+
|
2504 |
+
if (m_comps_in_scan == 1)
|
2505 |
+
{
|
2506 |
+
m_mcu_org[0] = m_comp_list[0];
|
2507 |
+
|
2508 |
+
m_blocks_per_mcu = 1;
|
2509 |
+
}
|
2510 |
+
else
|
2511 |
+
{
|
2512 |
+
m_blocks_per_mcu = 0;
|
2513 |
+
|
2514 |
+
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
2515 |
+
{
|
2516 |
+
int num_blocks;
|
2517 |
+
|
2518 |
+
component_id = m_comp_list[component_num];
|
2519 |
+
|
2520 |
+
num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id];
|
2521 |
+
|
2522 |
+
while (num_blocks--)
|
2523 |
+
m_mcu_org[m_blocks_per_mcu++] = component_id;
|
2524 |
+
}
|
2525 |
+
}
|
2526 |
+
}
|
2527 |
+
|
2528 |
+
// Starts a new scan.
|
2529 |
+
int jpeg_decoder::init_scan()
|
2530 |
+
{
|
2531 |
+
if (!locate_sos_marker())
|
2532 |
+
return JPGD_FALSE;
|
2533 |
+
|
2534 |
+
calc_mcu_block_order();
|
2535 |
+
|
2536 |
+
check_huff_tables();
|
2537 |
+
|
2538 |
+
check_quant_tables();
|
2539 |
+
|
2540 |
+
memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
|
2541 |
+
|
2542 |
+
m_eob_run = 0;
|
2543 |
+
|
2544 |
+
if (m_restart_interval)
|
2545 |
+
{
|
2546 |
+
m_restarts_left = m_restart_interval;
|
2547 |
+
m_next_restart_num = 0;
|
2548 |
+
}
|
2549 |
+
|
2550 |
+
fix_in_buffer();
|
2551 |
+
|
2552 |
+
return JPGD_TRUE;
|
2553 |
+
}
|
2554 |
+
|
2555 |
+
// Starts a frame. Determines if the number of components or sampling factors
|
2556 |
+
// are supported.
|
2557 |
+
void jpeg_decoder::init_frame()
|
2558 |
+
{
|
2559 |
+
int i;
|
2560 |
+
|
2561 |
+
if (m_comps_in_frame == 1)
|
2562 |
+
{
|
2563 |
+
if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1))
|
2564 |
+
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2565 |
+
|
2566 |
+
m_scan_type = JPGD_GRAYSCALE;
|
2567 |
+
m_max_blocks_per_mcu = 1;
|
2568 |
+
m_max_mcu_x_size = 8;
|
2569 |
+
m_max_mcu_y_size = 8;
|
2570 |
+
}
|
2571 |
+
else if (m_comps_in_frame == 3)
|
2572 |
+
{
|
2573 |
+
if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) ||
|
2574 |
+
((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) )
|
2575 |
+
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2576 |
+
|
2577 |
+
if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
|
2578 |
+
{
|
2579 |
+
m_scan_type = JPGD_YH1V1;
|
2580 |
+
|
2581 |
+
m_max_blocks_per_mcu = 3;
|
2582 |
+
m_max_mcu_x_size = 8;
|
2583 |
+
m_max_mcu_y_size = 8;
|
2584 |
+
}
|
2585 |
+
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
|
2586 |
+
{
|
2587 |
+
m_scan_type = JPGD_YH2V1;
|
2588 |
+
m_max_blocks_per_mcu = 4;
|
2589 |
+
m_max_mcu_x_size = 16;
|
2590 |
+
m_max_mcu_y_size = 8;
|
2591 |
+
}
|
2592 |
+
else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2))
|
2593 |
+
{
|
2594 |
+
m_scan_type = JPGD_YH1V2;
|
2595 |
+
m_max_blocks_per_mcu = 4;
|
2596 |
+
m_max_mcu_x_size = 8;
|
2597 |
+
m_max_mcu_y_size = 16;
|
2598 |
+
}
|
2599 |
+
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
|
2600 |
+
{
|
2601 |
+
m_scan_type = JPGD_YH2V2;
|
2602 |
+
m_max_blocks_per_mcu = 6;
|
2603 |
+
m_max_mcu_x_size = 16;
|
2604 |
+
m_max_mcu_y_size = 16;
|
2605 |
+
}
|
2606 |
+
else
|
2607 |
+
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2608 |
+
}
|
2609 |
+
else
|
2610 |
+
stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
|
2611 |
+
|
2612 |
+
m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size;
|
2613 |
+
m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size;
|
2614 |
+
|
2615 |
+
// These values are for the *destination* pixels: after conversion.
|
2616 |
+
if (m_scan_type == JPGD_GRAYSCALE)
|
2617 |
+
m_dest_bytes_per_pixel = 1;
|
2618 |
+
else
|
2619 |
+
m_dest_bytes_per_pixel = 4;
|
2620 |
+
|
2621 |
+
m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel;
|
2622 |
+
|
2623 |
+
m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel);
|
2624 |
+
|
2625 |
+
// Initialize two scan line buffers.
|
2626 |
+
m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
|
2627 |
+
if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2))
|
2628 |
+
m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
|
2629 |
+
|
2630 |
+
m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu;
|
2631 |
+
|
2632 |
+
// Should never happen
|
2633 |
+
if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW)
|
2634 |
+
stop_decoding(JPGD_ASSERTION_ERROR);
|
2635 |
+
|
2636 |
+
// Allocate the coefficient buffer, enough for one MCU
|
2637 |
+
m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t));
|
2638 |
+
|
2639 |
+
for (i = 0; i < m_max_blocks_per_mcu; i++)
|
2640 |
+
m_mcu_block_max_zag[i] = 64;
|
2641 |
+
|
2642 |
+
m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0];
|
2643 |
+
m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame;
|
2644 |
+
m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu;
|
2645 |
+
// Freq. domain chroma upsampling is only supported for H2V2 subsampling factor.
|
2646 |
+
// BEGIN EPIC MOD
|
2647 |
+
#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING
|
2648 |
+
m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3);
|
2649 |
+
#else
|
2650 |
+
m_freq_domain_chroma_upsample = 0;
|
2651 |
+
#endif
|
2652 |
+
// END EPIC MOD
|
2653 |
+
|
2654 |
+
if (m_freq_domain_chroma_upsample)
|
2655 |
+
m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64);
|
2656 |
+
else
|
2657 |
+
m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64);
|
2658 |
+
|
2659 |
+
m_total_lines_left = m_image_y_size;
|
2660 |
+
|
2661 |
+
m_mcu_lines_left = 0;
|
2662 |
+
|
2663 |
+
create_look_ups();
|
2664 |
+
}
|
2665 |
+
|
2666 |
+
// The coeff_buf series of methods originally stored the coefficients
|
2667 |
+
// into a "virtual" file which was located in EMS, XMS, or a disk file. A cache
|
2668 |
+
// was used to make this process more efficient. Now, we can store the entire
|
2669 |
+
// thing in RAM.
|
2670 |
+
jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y)
|
2671 |
+
{
|
2672 |
+
coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf));
|
2673 |
+
|
2674 |
+
cb->block_num_x = block_num_x;
|
2675 |
+
cb->block_num_y = block_num_y;
|
2676 |
+
cb->block_len_x = block_len_x;
|
2677 |
+
cb->block_len_y = block_len_y;
|
2678 |
+
cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t);
|
2679 |
+
cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true);
|
2680 |
+
return cb;
|
2681 |
+
}
|
2682 |
+
|
2683 |
+
inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y)
|
2684 |
+
{
|
2685 |
+
JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y));
|
2686 |
+
return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x));
|
2687 |
+
}
|
2688 |
+
|
2689 |
+
// The following methods decode the various types of m_blocks encountered
|
2690 |
+
// in progressively encoded images.
|
2691 |
+
void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2692 |
+
{
|
2693 |
+
int s, r;
|
2694 |
+
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
|
2695 |
+
|
2696 |
+
if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0)
|
2697 |
+
{
|
2698 |
+
r = pD->get_bits_no_markers(s);
|
2699 |
+
s = HUFF_EXTEND(r, s);
|
2700 |
+
}
|
2701 |
+
|
2702 |
+
pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]);
|
2703 |
+
|
2704 |
+
p[0] = static_cast<jpgd_block_t>(s << pD->m_successive_low);
|
2705 |
+
}
|
2706 |
+
|
2707 |
+
void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2708 |
+
{
|
2709 |
+
if (pD->get_bits_no_markers(1))
|
2710 |
+
{
|
2711 |
+
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
|
2712 |
+
|
2713 |
+
p[0] |= (1 << pD->m_successive_low);
|
2714 |
+
}
|
2715 |
+
}
|
2716 |
+
|
2717 |
+
void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2718 |
+
{
|
2719 |
+
int k, s, r;
|
2720 |
+
|
2721 |
+
if (pD->m_eob_run)
|
2722 |
+
{
|
2723 |
+
pD->m_eob_run--;
|
2724 |
+
return;
|
2725 |
+
}
|
2726 |
+
|
2727 |
+
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
|
2728 |
+
|
2729 |
+
for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++)
|
2730 |
+
{
|
2731 |
+
s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
|
2732 |
+
|
2733 |
+
r = s >> 4;
|
2734 |
+
s &= 15;
|
2735 |
+
|
2736 |
+
if (s)
|
2737 |
+
{
|
2738 |
+
if ((k += r) > 63)
|
2739 |
+
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2740 |
+
|
2741 |
+
r = pD->get_bits_no_markers(s);
|
2742 |
+
s = HUFF_EXTEND(r, s);
|
2743 |
+
|
2744 |
+
p[g_ZAG[k]] = static_cast<jpgd_block_t>(s << pD->m_successive_low);
|
2745 |
+
}
|
2746 |
+
else
|
2747 |
+
{
|
2748 |
+
if (r == 15)
|
2749 |
+
{
|
2750 |
+
if ((k += 15) > 63)
|
2751 |
+
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2752 |
+
}
|
2753 |
+
else
|
2754 |
+
{
|
2755 |
+
pD->m_eob_run = 1 << r;
|
2756 |
+
|
2757 |
+
if (r)
|
2758 |
+
pD->m_eob_run += pD->get_bits_no_markers(r);
|
2759 |
+
|
2760 |
+
pD->m_eob_run--;
|
2761 |
+
|
2762 |
+
break;
|
2763 |
+
}
|
2764 |
+
}
|
2765 |
+
}
|
2766 |
+
}
|
2767 |
+
|
2768 |
+
void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2769 |
+
{
|
2770 |
+
int s, k, r;
|
2771 |
+
int p1 = 1 << pD->m_successive_low;
|
2772 |
+
int m1 = (-1) << pD->m_successive_low;
|
2773 |
+
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
|
2774 |
+
|
2775 |
+
k = pD->m_spectral_start;
|
2776 |
+
|
2777 |
+
if (pD->m_eob_run == 0)
|
2778 |
+
{
|
2779 |
+
for ( ; k <= pD->m_spectral_end; k++)
|
2780 |
+
{
|
2781 |
+
s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
|
2782 |
+
|
2783 |
+
r = s >> 4;
|
2784 |
+
s &= 15;
|
2785 |
+
|
2786 |
+
if (s)
|
2787 |
+
{
|
2788 |
+
if (s != 1)
|
2789 |
+
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2790 |
+
|
2791 |
+
if (pD->get_bits_no_markers(1))
|
2792 |
+
s = p1;
|
2793 |
+
else
|
2794 |
+
s = m1;
|
2795 |
+
}
|
2796 |
+
else
|
2797 |
+
{
|
2798 |
+
if (r != 15)
|
2799 |
+
{
|
2800 |
+
pD->m_eob_run = 1 << r;
|
2801 |
+
|
2802 |
+
if (r)
|
2803 |
+
pD->m_eob_run += pD->get_bits_no_markers(r);
|
2804 |
+
|
2805 |
+
break;
|
2806 |
+
}
|
2807 |
+
}
|
2808 |
+
|
2809 |
+
do
|
2810 |
+
{
|
2811 |
+
// BEGIN EPIC MOD
|
2812 |
+
JPGD_ASSERT(k < 64);
|
2813 |
+
// END EPIC MOD
|
2814 |
+
|
2815 |
+
jpgd_block_t *this_coef = p + g_ZAG[k];
|
2816 |
+
|
2817 |
+
if (*this_coef != 0)
|
2818 |
+
{
|
2819 |
+
if (pD->get_bits_no_markers(1))
|
2820 |
+
{
|
2821 |
+
if ((*this_coef & p1) == 0)
|
2822 |
+
{
|
2823 |
+
if (*this_coef >= 0)
|
2824 |
+
*this_coef = static_cast<jpgd_block_t>(*this_coef + p1);
|
2825 |
+
else
|
2826 |
+
*this_coef = static_cast<jpgd_block_t>(*this_coef + m1);
|
2827 |
+
}
|
2828 |
+
}
|
2829 |
+
}
|
2830 |
+
else
|
2831 |
+
{
|
2832 |
+
if (--r < 0)
|
2833 |
+
break;
|
2834 |
+
}
|
2835 |
+
|
2836 |
+
k++;
|
2837 |
+
|
2838 |
+
} while (k <= pD->m_spectral_end);
|
2839 |
+
|
2840 |
+
if ((s) && (k < 64))
|
2841 |
+
{
|
2842 |
+
p[g_ZAG[k]] = static_cast<jpgd_block_t>(s);
|
2843 |
+
}
|
2844 |
+
}
|
2845 |
+
}
|
2846 |
+
|
2847 |
+
if (pD->m_eob_run > 0)
|
2848 |
+
{
|
2849 |
+
for ( ; k <= pD->m_spectral_end; k++)
|
2850 |
+
{
|
2851 |
+
// BEGIN EPIC MOD
|
2852 |
+
JPGD_ASSERT(k < 64);
|
2853 |
+
// END EPIC MOD
|
2854 |
+
|
2855 |
+
jpgd_block_t *this_coef = p + g_ZAG[k];
|
2856 |
+
|
2857 |
+
if (*this_coef != 0)
|
2858 |
+
{
|
2859 |
+
if (pD->get_bits_no_markers(1))
|
2860 |
+
{
|
2861 |
+
if ((*this_coef & p1) == 0)
|
2862 |
+
{
|
2863 |
+
if (*this_coef >= 0)
|
2864 |
+
*this_coef = static_cast<jpgd_block_t>(*this_coef + p1);
|
2865 |
+
else
|
2866 |
+
*this_coef = static_cast<jpgd_block_t>(*this_coef + m1);
|
2867 |
+
}
|
2868 |
+
}
|
2869 |
+
}
|
2870 |
+
}
|
2871 |
+
|
2872 |
+
pD->m_eob_run--;
|
2873 |
+
}
|
2874 |
+
}
|
2875 |
+
|
2876 |
+
// Decode a scan in a progressively encoded image.
|
2877 |
+
void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func)
|
2878 |
+
{
|
2879 |
+
int mcu_row, mcu_col, mcu_block;
|
2880 |
+
int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
2881 |
+
|
2882 |
+
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
2883 |
+
|
2884 |
+
for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++)
|
2885 |
+
{
|
2886 |
+
int component_num, component_id;
|
2887 |
+
|
2888 |
+
memset(block_x_mcu, 0, sizeof(block_x_mcu));
|
2889 |
+
|
2890 |
+
for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
2891 |
+
{
|
2892 |
+
int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
|
2893 |
+
|
2894 |
+
if ((m_restart_interval) && (m_restarts_left == 0))
|
2895 |
+
process_restart();
|
2896 |
+
|
2897 |
+
for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
2898 |
+
{
|
2899 |
+
component_id = m_mcu_org[mcu_block];
|
2900 |
+
|
2901 |
+
decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
2902 |
+
|
2903 |
+
if (m_comps_in_scan == 1)
|
2904 |
+
block_x_mcu[component_id]++;
|
2905 |
+
else
|
2906 |
+
{
|
2907 |
+
if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
|
2908 |
+
{
|
2909 |
+
block_x_mcu_ofs = 0;
|
2910 |
+
|
2911 |
+
if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
|
2912 |
+
{
|
2913 |
+
block_y_mcu_ofs = 0;
|
2914 |
+
block_x_mcu[component_id] += m_comp_h_samp[component_id];
|
2915 |
+
}
|
2916 |
+
}
|
2917 |
+
}
|
2918 |
+
}
|
2919 |
+
|
2920 |
+
m_restarts_left--;
|
2921 |
+
}
|
2922 |
+
|
2923 |
+
if (m_comps_in_scan == 1)
|
2924 |
+
m_block_y_mcu[m_comp_list[0]]++;
|
2925 |
+
else
|
2926 |
+
{
|
2927 |
+
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
2928 |
+
{
|
2929 |
+
component_id = m_comp_list[component_num];
|
2930 |
+
m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
|
2931 |
+
}
|
2932 |
+
}
|
2933 |
+
}
|
2934 |
+
}
|
2935 |
+
|
2936 |
+
// Decode a progressively encoded image.
|
2937 |
+
void jpeg_decoder::init_progressive()
|
2938 |
+
{
|
2939 |
+
int i;
|
2940 |
+
|
2941 |
+
if (m_comps_in_frame == 4)
|
2942 |
+
stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
|
2943 |
+
|
2944 |
+
// Allocate the coefficient buffers.
|
2945 |
+
for (i = 0; i < m_comps_in_frame; i++)
|
2946 |
+
{
|
2947 |
+
m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1);
|
2948 |
+
m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8);
|
2949 |
+
}
|
2950 |
+
|
2951 |
+
for ( ; ; )
|
2952 |
+
{
|
2953 |
+
int dc_only_scan, refinement_scan;
|
2954 |
+
pDecode_block_func decode_block_func;
|
2955 |
+
|
2956 |
+
if (!init_scan())
|
2957 |
+
break;
|
2958 |
+
|
2959 |
+
dc_only_scan = (m_spectral_start == 0);
|
2960 |
+
refinement_scan = (m_successive_high != 0);
|
2961 |
+
|
2962 |
+
if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63))
|
2963 |
+
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2964 |
+
|
2965 |
+
if (dc_only_scan)
|
2966 |
+
{
|
2967 |
+
if (m_spectral_end)
|
2968 |
+
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2969 |
+
}
|
2970 |
+
else if (m_comps_in_scan != 1) /* AC scans can only contain one component */
|
2971 |
+
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2972 |
+
|
2973 |
+
if ((refinement_scan) && (m_successive_low != m_successive_high - 1))
|
2974 |
+
stop_decoding(JPGD_BAD_SOS_SUCCESSIVE);
|
2975 |
+
|
2976 |
+
if (dc_only_scan)
|
2977 |
+
{
|
2978 |
+
if (refinement_scan)
|
2979 |
+
decode_block_func = decode_block_dc_refine;
|
2980 |
+
else
|
2981 |
+
decode_block_func = decode_block_dc_first;
|
2982 |
+
}
|
2983 |
+
else
|
2984 |
+
{
|
2985 |
+
if (refinement_scan)
|
2986 |
+
decode_block_func = decode_block_ac_refine;
|
2987 |
+
else
|
2988 |
+
decode_block_func = decode_block_ac_first;
|
2989 |
+
}
|
2990 |
+
|
2991 |
+
decode_scan(decode_block_func);
|
2992 |
+
|
2993 |
+
m_bits_left = 16;
|
2994 |
+
get_bits(16);
|
2995 |
+
get_bits(16);
|
2996 |
+
}
|
2997 |
+
|
2998 |
+
m_comps_in_scan = m_comps_in_frame;
|
2999 |
+
|
3000 |
+
for (i = 0; i < m_comps_in_frame; i++)
|
3001 |
+
m_comp_list[i] = i;
|
3002 |
+
|
3003 |
+
calc_mcu_block_order();
|
3004 |
+
}
|
3005 |
+
|
3006 |
+
void jpeg_decoder::init_sequential()
|
3007 |
+
{
|
3008 |
+
if (!init_scan())
|
3009 |
+
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
3010 |
+
}
|
3011 |
+
|
3012 |
+
void jpeg_decoder::decode_start()
|
3013 |
+
{
|
3014 |
+
init_frame();
|
3015 |
+
|
3016 |
+
if (m_progressive_flag)
|
3017 |
+
init_progressive();
|
3018 |
+
else
|
3019 |
+
init_sequential();
|
3020 |
+
}
|
3021 |
+
|
3022 |
+
void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream)
|
3023 |
+
{
|
3024 |
+
init(pStream);
|
3025 |
+
locate_sof_marker();
|
3026 |
+
}
|
3027 |
+
|
3028 |
+
jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream)
|
3029 |
+
{
|
3030 |
+
if (setjmp(m_jmp_state))
|
3031 |
+
return;
|
3032 |
+
decode_init(pStream);
|
3033 |
+
}
|
3034 |
+
|
3035 |
+
int jpeg_decoder::begin_decoding()
|
3036 |
+
{
|
3037 |
+
if (m_ready_flag)
|
3038 |
+
return JPGD_SUCCESS;
|
3039 |
+
|
3040 |
+
if (m_error_code)
|
3041 |
+
return JPGD_FAILED;
|
3042 |
+
|
3043 |
+
if (setjmp(m_jmp_state))
|
3044 |
+
return JPGD_FAILED;
|
3045 |
+
|
3046 |
+
decode_start();
|
3047 |
+
|
3048 |
+
m_ready_flag = true;
|
3049 |
+
|
3050 |
+
return JPGD_SUCCESS;
|
3051 |
+
}
|
3052 |
+
|
3053 |
+
jpeg_decoder::~jpeg_decoder()
|
3054 |
+
{
|
3055 |
+
free_all_blocks();
|
3056 |
+
}
|
3057 |
+
|
3058 |
+
jpeg_decoder_file_stream::jpeg_decoder_file_stream()
|
3059 |
+
{
|
3060 |
+
m_pFile = NULL;
|
3061 |
+
m_eof_flag = false;
|
3062 |
+
m_error_flag = false;
|
3063 |
+
}
|
3064 |
+
|
3065 |
+
void jpeg_decoder_file_stream::close()
|
3066 |
+
{
|
3067 |
+
if (m_pFile)
|
3068 |
+
{
|
3069 |
+
fclose(m_pFile);
|
3070 |
+
m_pFile = NULL;
|
3071 |
+
}
|
3072 |
+
|
3073 |
+
m_eof_flag = false;
|
3074 |
+
m_error_flag = false;
|
3075 |
+
}
|
3076 |
+
|
3077 |
+
jpeg_decoder_file_stream::~jpeg_decoder_file_stream()
|
3078 |
+
{
|
3079 |
+
close();
|
3080 |
+
}
|
3081 |
+
|
3082 |
+
bool jpeg_decoder_file_stream::open(const char *Pfilename)
|
3083 |
+
{
|
3084 |
+
close();
|
3085 |
+
|
3086 |
+
m_eof_flag = false;
|
3087 |
+
m_error_flag = false;
|
3088 |
+
|
3089 |
+
#if defined(_MSC_VER)
|
3090 |
+
m_pFile = NULL;
|
3091 |
+
fopen_s(&m_pFile, Pfilename, "rb");
|
3092 |
+
#else
|
3093 |
+
m_pFile = fopen(Pfilename, "rb");
|
3094 |
+
#endif
|
3095 |
+
return m_pFile != NULL;
|
3096 |
+
}
|
3097 |
+
|
3098 |
+
int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
|
3099 |
+
{
|
3100 |
+
if (!m_pFile)
|
3101 |
+
return -1;
|
3102 |
+
|
3103 |
+
if (m_eof_flag)
|
3104 |
+
{
|
3105 |
+
*pEOF_flag = true;
|
3106 |
+
return 0;
|
3107 |
+
}
|
3108 |
+
|
3109 |
+
if (m_error_flag)
|
3110 |
+
return -1;
|
3111 |
+
|
3112 |
+
int bytes_read = static_cast<int>(fread(pBuf, 1, max_bytes_to_read, m_pFile));
|
3113 |
+
if (bytes_read < max_bytes_to_read)
|
3114 |
+
{
|
3115 |
+
if (ferror(m_pFile))
|
3116 |
+
{
|
3117 |
+
m_error_flag = true;
|
3118 |
+
return -1;
|
3119 |
+
}
|
3120 |
+
|
3121 |
+
m_eof_flag = true;
|
3122 |
+
*pEOF_flag = true;
|
3123 |
+
}
|
3124 |
+
|
3125 |
+
return bytes_read;
|
3126 |
+
}
|
3127 |
+
|
3128 |
+
bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size)
|
3129 |
+
{
|
3130 |
+
close();
|
3131 |
+
m_pSrc_data = pSrc_data;
|
3132 |
+
m_ofs = 0;
|
3133 |
+
m_size = size;
|
3134 |
+
return true;
|
3135 |
+
}
|
3136 |
+
|
3137 |
+
int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
|
3138 |
+
{
|
3139 |
+
*pEOF_flag = false;
|
3140 |
+
|
3141 |
+
if (!m_pSrc_data)
|
3142 |
+
return -1;
|
3143 |
+
|
3144 |
+
uint bytes_remaining = m_size - m_ofs;
|
3145 |
+
if ((uint)max_bytes_to_read > bytes_remaining)
|
3146 |
+
{
|
3147 |
+
max_bytes_to_read = bytes_remaining;
|
3148 |
+
*pEOF_flag = true;
|
3149 |
+
}
|
3150 |
+
|
3151 |
+
memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read);
|
3152 |
+
m_ofs += max_bytes_to_read;
|
3153 |
+
|
3154 |
+
return max_bytes_to_read;
|
3155 |
+
}
|
3156 |
+
|
3157 |
+
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps)
|
3158 |
+
{
|
3159 |
+
if (!actual_comps)
|
3160 |
+
return NULL;
|
3161 |
+
*actual_comps = 0;
|
3162 |
+
|
3163 |
+
if ((!pStream) || (!width) || (!height) || (!req_comps))
|
3164 |
+
return NULL;
|
3165 |
+
|
3166 |
+
if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4))
|
3167 |
+
return NULL;
|
3168 |
+
|
3169 |
+
jpeg_decoder decoder(pStream);
|
3170 |
+
if (decoder.get_error_code() != JPGD_SUCCESS)
|
3171 |
+
return NULL;
|
3172 |
+
|
3173 |
+
const int image_width = decoder.get_width(), image_height = decoder.get_height();
|
3174 |
+
*width = image_width;
|
3175 |
+
*height = image_height;
|
3176 |
+
*actual_comps = decoder.get_num_components();
|
3177 |
+
|
3178 |
+
if (decoder.begin_decoding() != JPGD_SUCCESS)
|
3179 |
+
return NULL;
|
3180 |
+
|
3181 |
+
const int dst_bpl = image_width * req_comps;
|
3182 |
+
|
3183 |
+
uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height);
|
3184 |
+
if (!pImage_data)
|
3185 |
+
return NULL;
|
3186 |
+
|
3187 |
+
for (int y = 0; y < image_height; y++)
|
3188 |
+
{
|
3189 |
+
const uint8* pScan_line = 0;
|
3190 |
+
uint scan_line_len;
|
3191 |
+
if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS)
|
3192 |
+
{
|
3193 |
+
jpgd_free(pImage_data);
|
3194 |
+
return NULL;
|
3195 |
+
}
|
3196 |
+
|
3197 |
+
uint8 *pDst = pImage_data + y * dst_bpl;
|
3198 |
+
|
3199 |
+
if (((req_comps == 4) && (decoder.get_num_components() == 3)) ||
|
3200 |
+
((req_comps == 1) && (decoder.get_num_components() == 1)))
|
3201 |
+
{
|
3202 |
+
memcpy(pDst, pScan_line, dst_bpl);
|
3203 |
+
}
|
3204 |
+
else if (decoder.get_num_components() == 1)
|
3205 |
+
{
|
3206 |
+
if (req_comps == 3)
|
3207 |
+
{
|
3208 |
+
for (int x = 0; x < image_width; x++)
|
3209 |
+
{
|
3210 |
+
uint8 luma = pScan_line[x];
|
3211 |
+
pDst[0] = luma;
|
3212 |
+
pDst[1] = luma;
|
3213 |
+
pDst[2] = luma;
|
3214 |
+
pDst += 3;
|
3215 |
+
}
|
3216 |
+
}
|
3217 |
+
else
|
3218 |
+
{
|
3219 |
+
for (int x = 0; x < image_width; x++)
|
3220 |
+
{
|
3221 |
+
uint8 luma = pScan_line[x];
|
3222 |
+
pDst[0] = luma;
|
3223 |
+
pDst[1] = luma;
|
3224 |
+
pDst[2] = luma;
|
3225 |
+
pDst[3] = 255;
|
3226 |
+
pDst += 4;
|
3227 |
+
}
|
3228 |
+
}
|
3229 |
+
}
|
3230 |
+
else if (decoder.get_num_components() == 3)
|
3231 |
+
{
|
3232 |
+
if (req_comps == 1)
|
3233 |
+
{
|
3234 |
+
const int YR = 19595, YG = 38470, YB = 7471;
|
3235 |
+
for (int x = 0; x < image_width; x++)
|
3236 |
+
{
|
3237 |
+
int r = pScan_line[x*4+0];
|
3238 |
+
int g = pScan_line[x*4+1];
|
3239 |
+
int b = pScan_line[x*4+2];
|
3240 |
+
*pDst++ = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
3241 |
+
}
|
3242 |
+
}
|
3243 |
+
else
|
3244 |
+
{
|
3245 |
+
for (int x = 0; x < image_width; x++)
|
3246 |
+
{
|
3247 |
+
pDst[0] = pScan_line[x*4+0];
|
3248 |
+
pDst[1] = pScan_line[x*4+1];
|
3249 |
+
pDst[2] = pScan_line[x*4+2];
|
3250 |
+
pDst += 3;
|
3251 |
+
}
|
3252 |
+
}
|
3253 |
+
}
|
3254 |
+
}
|
3255 |
+
|
3256 |
+
return pImage_data;
|
3257 |
+
}
|
3258 |
+
|
3259 |
+
// BEGIN EPIC MOD
|
3260 |
+
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format)
|
3261 |
+
{
|
3262 |
+
jpg_format = (ERGBFormatJPG)format;
|
3263 |
+
// EMD EPIC MOD
|
3264 |
+
jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size);
|
3265 |
+
return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps);
|
3266 |
+
}
|
3267 |
+
|
3268 |
+
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps)
|
3269 |
+
{
|
3270 |
+
jpgd::jpeg_decoder_file_stream file_stream;
|
3271 |
+
if (!file_stream.open(pSrc_filename))
|
3272 |
+
return NULL;
|
3273 |
+
return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps);
|
3274 |
+
}
|
3275 |
+
|
3276 |
+
} // namespace jpgd
|
crazy_functions/test_project/Cpp/libJPG/jpgd.h
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// jpgd.h - C++ class for JPEG decompression.
|
2 |
+
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
3 |
+
#ifndef JPEG_DECODER_H
|
4 |
+
#define JPEG_DECODER_H
|
5 |
+
|
6 |
+
#include <stdlib.h>
|
7 |
+
#include <stdio.h>
|
8 |
+
#include <setjmp.h>
|
9 |
+
|
10 |
+
namespace jpgd
|
11 |
+
{
|
12 |
+
typedef unsigned char uint8;
|
13 |
+
typedef signed short int16;
|
14 |
+
typedef unsigned short uint16;
|
15 |
+
typedef unsigned int uint;
|
16 |
+
typedef signed int int32;
|
17 |
+
|
18 |
+
// Loads a JPEG image from a memory buffer or a file.
|
19 |
+
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
|
20 |
+
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
|
21 |
+
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
|
22 |
+
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
|
23 |
+
// BEGIN EPIC MOD
|
24 |
+
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
|
25 |
+
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
|
26 |
+
// END EPIC MOD
|
27 |
+
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
|
28 |
+
|
29 |
+
// Success/failure error codes.
|
30 |
+
enum jpgd_status
|
31 |
+
{
|
32 |
+
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
|
33 |
+
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
|
34 |
+
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
|
35 |
+
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
|
36 |
+
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
|
37 |
+
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
|
38 |
+
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
|
39 |
+
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
|
40 |
+
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
|
41 |
+
};
|
42 |
+
|
43 |
+
// Input stream interface.
|
44 |
+
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
|
45 |
+
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
|
46 |
+
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
|
47 |
+
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
|
48 |
+
class jpeg_decoder_stream
|
49 |
+
{
|
50 |
+
public:
|
51 |
+
jpeg_decoder_stream() { }
|
52 |
+
virtual ~jpeg_decoder_stream() { }
|
53 |
+
|
54 |
+
// The read() method is called when the internal input buffer is empty.
|
55 |
+
// Parameters:
|
56 |
+
// pBuf - input buffer
|
57 |
+
// max_bytes_to_read - maximum bytes that can be written to pBuf
|
58 |
+
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
|
59 |
+
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
|
60 |
+
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
|
61 |
+
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
|
62 |
+
};
|
63 |
+
|
64 |
+
// stdio FILE stream class.
|
65 |
+
class jpeg_decoder_file_stream : public jpeg_decoder_stream
|
66 |
+
{
|
67 |
+
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
|
68 |
+
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
|
69 |
+
|
70 |
+
FILE *m_pFile;
|
71 |
+
bool m_eof_flag, m_error_flag;
|
72 |
+
|
73 |
+
public:
|
74 |
+
jpeg_decoder_file_stream();
|
75 |
+
virtual ~jpeg_decoder_file_stream();
|
76 |
+
|
77 |
+
bool open(const char *Pfilename);
|
78 |
+
void close();
|
79 |
+
|
80 |
+
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
81 |
+
};
|
82 |
+
|
83 |
+
// Memory stream class.
|
84 |
+
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
|
85 |
+
{
|
86 |
+
const uint8 *m_pSrc_data;
|
87 |
+
uint m_ofs, m_size;
|
88 |
+
|
89 |
+
public:
|
90 |
+
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
|
91 |
+
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
|
92 |
+
|
93 |
+
virtual ~jpeg_decoder_mem_stream() { }
|
94 |
+
|
95 |
+
bool open(const uint8 *pSrc_data, uint size);
|
96 |
+
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
|
97 |
+
|
98 |
+
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
99 |
+
};
|
100 |
+
|
101 |
+
// Loads JPEG file from a jpeg_decoder_stream.
|
102 |
+
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
|
103 |
+
|
104 |
+
enum
|
105 |
+
{
|
106 |
+
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
|
107 |
+
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
|
108 |
+
};
|
109 |
+
|
110 |
+
typedef int16 jpgd_quant_t;
|
111 |
+
typedef int16 jpgd_block_t;
|
112 |
+
|
113 |
+
class jpeg_decoder
|
114 |
+
{
|
115 |
+
public:
|
116 |
+
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
|
117 |
+
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
|
118 |
+
jpeg_decoder(jpeg_decoder_stream *pStream);
|
119 |
+
|
120 |
+
~jpeg_decoder();
|
121 |
+
|
122 |
+
// Call this method after constructing the object to begin decompression.
|
123 |
+
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
|
124 |
+
int begin_decoding();
|
125 |
+
|
126 |
+
// Returns the next scan line.
|
127 |
+
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
|
128 |
+
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
|
129 |
+
// Returns JPGD_SUCCESS if a scan line has been returned.
|
130 |
+
// Returns JPGD_DONE if all scan lines have been returned.
|
131 |
+
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
|
132 |
+
int decode(const void** pScan_line, uint* pScan_line_len);
|
133 |
+
|
134 |
+
inline jpgd_status get_error_code() const { return m_error_code; }
|
135 |
+
|
136 |
+
inline int get_width() const { return m_image_x_size; }
|
137 |
+
inline int get_height() const { return m_image_y_size; }
|
138 |
+
|
139 |
+
inline int get_num_components() const { return m_comps_in_frame; }
|
140 |
+
|
141 |
+
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
|
142 |
+
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
|
143 |
+
|
144 |
+
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
|
145 |
+
inline int get_total_bytes_read() const { return m_total_bytes_read; }
|
146 |
+
|
147 |
+
private:
|
148 |
+
jpeg_decoder(const jpeg_decoder &);
|
149 |
+
jpeg_decoder &operator =(const jpeg_decoder &);
|
150 |
+
|
151 |
+
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
|
152 |
+
|
153 |
+
struct huff_tables
|
154 |
+
{
|
155 |
+
bool ac_table;
|
156 |
+
uint look_up[256];
|
157 |
+
uint look_up2[256];
|
158 |
+
uint8 code_size[256];
|
159 |
+
uint tree[512];
|
160 |
+
};
|
161 |
+
|
162 |
+
struct coeff_buf
|
163 |
+
{
|
164 |
+
uint8 *pData;
|
165 |
+
int block_num_x, block_num_y;
|
166 |
+
int block_len_x, block_len_y;
|
167 |
+
int block_size;
|
168 |
+
};
|
169 |
+
|
170 |
+
struct mem_block
|
171 |
+
{
|
172 |
+
mem_block *m_pNext;
|
173 |
+
size_t m_used_count;
|
174 |
+
size_t m_size;
|
175 |
+
char m_data[1];
|
176 |
+
};
|
177 |
+
|
178 |
+
jmp_buf m_jmp_state;
|
179 |
+
mem_block *m_pMem_blocks;
|
180 |
+
int m_image_x_size;
|
181 |
+
int m_image_y_size;
|
182 |
+
jpeg_decoder_stream *m_pStream;
|
183 |
+
int m_progressive_flag;
|
184 |
+
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
|
185 |
+
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
|
186 |
+
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
|
187 |
+
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
|
188 |
+
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
|
189 |
+
int m_comps_in_frame; // # of components in frame
|
190 |
+
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
|
191 |
+
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
|
192 |
+
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
|
193 |
+
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
|
194 |
+
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
|
195 |
+
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
|
196 |
+
int m_comps_in_scan; // # of components in scan
|
197 |
+
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
|
198 |
+
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
|
199 |
+
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
|
200 |
+
int m_spectral_start; // spectral selection start
|
201 |
+
int m_spectral_end; // spectral selection end
|
202 |
+
int m_successive_low; // successive approximation low
|
203 |
+
int m_successive_high; // successive approximation high
|
204 |
+
int m_max_mcu_x_size; // MCU's max. X size in pixels
|
205 |
+
int m_max_mcu_y_size; // MCU's max. Y size in pixels
|
206 |
+
int m_blocks_per_mcu;
|
207 |
+
int m_max_blocks_per_row;
|
208 |
+
int m_mcus_per_row, m_mcus_per_col;
|
209 |
+
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
|
210 |
+
int m_total_lines_left; // total # lines left in image
|
211 |
+
int m_mcu_lines_left; // total # lines left in this MCU
|
212 |
+
int m_real_dest_bytes_per_scan_line;
|
213 |
+
int m_dest_bytes_per_scan_line; // rounded up
|
214 |
+
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
|
215 |
+
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
|
216 |
+
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
|
217 |
+
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
|
218 |
+
int m_eob_run;
|
219 |
+
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
220 |
+
uint8* m_pIn_buf_ofs;
|
221 |
+
int m_in_buf_left;
|
222 |
+
int m_tem_flag;
|
223 |
+
bool m_eof_flag;
|
224 |
+
uint8 m_in_buf_pad_start[128];
|
225 |
+
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
|
226 |
+
uint8 m_in_buf_pad_end[128];
|
227 |
+
int m_bits_left;
|
228 |
+
uint m_bit_buf;
|
229 |
+
int m_restart_interval;
|
230 |
+
int m_restarts_left;
|
231 |
+
int m_next_restart_num;
|
232 |
+
int m_max_mcus_per_row;
|
233 |
+
int m_max_blocks_per_mcu;
|
234 |
+
int m_expanded_blocks_per_mcu;
|
235 |
+
int m_expanded_blocks_per_row;
|
236 |
+
int m_expanded_blocks_per_component;
|
237 |
+
bool m_freq_domain_chroma_upsample;
|
238 |
+
int m_max_mcus_per_col;
|
239 |
+
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
|
240 |
+
jpgd_block_t* m_pMCU_coefficients;
|
241 |
+
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
|
242 |
+
uint8* m_pSample_buf;
|
243 |
+
int m_crr[256];
|
244 |
+
int m_cbb[256];
|
245 |
+
int m_crg[256];
|
246 |
+
int m_cbg[256];
|
247 |
+
uint8* m_pScan_line_0;
|
248 |
+
uint8* m_pScan_line_1;
|
249 |
+
jpgd_status m_error_code;
|
250 |
+
bool m_ready_flag;
|
251 |
+
int m_total_bytes_read;
|
252 |
+
|
253 |
+
void free_all_blocks();
|
254 |
+
// BEGIN EPIC MOD
|
255 |
+
UE_NORETURN void stop_decoding(jpgd_status status);
|
256 |
+
// END EPIC MOD
|
257 |
+
void *alloc(size_t n, bool zero = false);
|
258 |
+
void word_clear(void *p, uint16 c, uint n);
|
259 |
+
void prep_in_buffer();
|
260 |
+
void read_dht_marker();
|
261 |
+
void read_dqt_marker();
|
262 |
+
void read_sof_marker();
|
263 |
+
void skip_variable_marker();
|
264 |
+
void read_dri_marker();
|
265 |
+
void read_sos_marker();
|
266 |
+
int next_marker();
|
267 |
+
int process_markers();
|
268 |
+
void locate_soi_marker();
|
269 |
+
void locate_sof_marker();
|
270 |
+
int locate_sos_marker();
|
271 |
+
void init(jpeg_decoder_stream * pStream);
|
272 |
+
void create_look_ups();
|
273 |
+
void fix_in_buffer();
|
274 |
+
void transform_mcu(int mcu_row);
|
275 |
+
void transform_mcu_expand(int mcu_row);
|
276 |
+
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
|
277 |
+
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
|
278 |
+
void load_next_row();
|
279 |
+
void decode_next_row();
|
280 |
+
void make_huff_table(int index, huff_tables *pH);
|
281 |
+
void check_quant_tables();
|
282 |
+
void check_huff_tables();
|
283 |
+
void calc_mcu_block_order();
|
284 |
+
int init_scan();
|
285 |
+
void init_frame();
|
286 |
+
void process_restart();
|
287 |
+
void decode_scan(pDecode_block_func decode_block_func);
|
288 |
+
void init_progressive();
|
289 |
+
void init_sequential();
|
290 |
+
void decode_start();
|
291 |
+
void decode_init(jpeg_decoder_stream * pStream);
|
292 |
+
void H2V2Convert();
|
293 |
+
void H2V1Convert();
|
294 |
+
void H1V2Convert();
|
295 |
+
void H1V1Convert();
|
296 |
+
void gray_convert();
|
297 |
+
void expanded_convert();
|
298 |
+
void find_eoi();
|
299 |
+
inline uint get_char();
|
300 |
+
inline uint get_char(bool *pPadding_flag);
|
301 |
+
inline void stuff_char(uint8 q);
|
302 |
+
inline uint8 get_octet();
|
303 |
+
inline uint get_bits(int num_bits);
|
304 |
+
inline uint get_bits_no_markers(int numbits);
|
305 |
+
inline int huff_decode(huff_tables *pH);
|
306 |
+
inline int huff_decode(huff_tables *pH, int& extrabits);
|
307 |
+
static inline uint8 clamp(int i);
|
308 |
+
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
309 |
+
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
310 |
+
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
311 |
+
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
312 |
+
};
|
313 |
+
|
314 |
+
} // namespace jpgd
|
315 |
+
|
316 |
+
#endif // JPEG_DECODER_H
|
crazy_functions/test_project/Cpp/libJPG/jpge.cpp
ADDED
@@ -0,0 +1,1049 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// jpge.cpp - C++ class for JPEG compression.
|
2 |
+
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
3 |
+
// v1.01, Dec. 18, 2010 - Initial release
|
4 |
+
// v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
|
5 |
+
// v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
|
6 |
+
// Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
|
7 |
+
// v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
|
8 |
+
// Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
|
9 |
+
// Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
|
10 |
+
|
11 |
+
#include "jpge.h"
|
12 |
+
|
13 |
+
#include <stdlib.h>
|
14 |
+
#include <string.h>
|
15 |
+
#if PLATFORM_WINDOWS
|
16 |
+
#include <malloc.h>
|
17 |
+
#endif
|
18 |
+
|
19 |
+
#define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
|
20 |
+
#define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
|
21 |
+
|
22 |
+
namespace jpge {
|
23 |
+
|
24 |
+
static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
|
25 |
+
static inline void jpge_free(void *p) { FMemory::Free(p);; }
|
26 |
+
|
27 |
+
// Various JPEG enums and tables.
|
28 |
+
enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
|
29 |
+
enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
|
30 |
+
|
31 |
+
static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
|
32 |
+
static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
|
33 |
+
static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
|
34 |
+
static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
|
35 |
+
static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
|
36 |
+
static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
|
37 |
+
static uint8 s_ac_lum_val[AC_LUM_CODES] =
|
38 |
+
{
|
39 |
+
0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
|
40 |
+
0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
|
41 |
+
0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
|
42 |
+
0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
|
43 |
+
0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
|
44 |
+
0xf9,0xfa
|
45 |
+
};
|
46 |
+
static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
|
47 |
+
static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
|
48 |
+
static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
|
49 |
+
static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
|
50 |
+
{
|
51 |
+
0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
|
52 |
+
0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
|
53 |
+
0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
|
54 |
+
0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
|
55 |
+
0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
|
56 |
+
0xf9,0xfa
|
57 |
+
};
|
58 |
+
|
59 |
+
// Low-level helper functions.
|
60 |
+
template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
|
61 |
+
|
62 |
+
const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
|
63 |
+
static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
|
64 |
+
|
65 |
+
static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
66 |
+
{
|
67 |
+
for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
|
68 |
+
{
|
69 |
+
const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
|
70 |
+
pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
71 |
+
pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
|
72 |
+
pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
77 |
+
{
|
78 |
+
for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
|
79 |
+
pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
|
80 |
+
}
|
81 |
+
|
82 |
+
static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
83 |
+
{
|
84 |
+
for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
|
85 |
+
{
|
86 |
+
const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
|
87 |
+
pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
88 |
+
pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
|
89 |
+
pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
|
90 |
+
}
|
91 |
+
}
|
92 |
+
|
93 |
+
static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
94 |
+
{
|
95 |
+
for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
|
96 |
+
pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
|
97 |
+
}
|
98 |
+
|
99 |
+
static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
|
100 |
+
{
|
101 |
+
for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
|
102 |
+
}
|
103 |
+
|
104 |
+
// Forward DCT - DCT derived from jfdctint.
|
105 |
+
#define CONST_BITS 13
|
106 |
+
#define ROW_BITS 2
|
107 |
+
#define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
|
108 |
+
#define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
|
109 |
+
#define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
|
110 |
+
int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
|
111 |
+
int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
|
112 |
+
int32 u1 = DCT_MUL(t12 + t13, 4433); \
|
113 |
+
s2 = u1 + DCT_MUL(t13, 6270); \
|
114 |
+
s6 = u1 + DCT_MUL(t12, -15137); \
|
115 |
+
u1 = t4 + t7; \
|
116 |
+
int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
|
117 |
+
int32 z5 = DCT_MUL(u3 + u4, 9633); \
|
118 |
+
t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
|
119 |
+
t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
|
120 |
+
u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
|
121 |
+
u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
|
122 |
+
u3 += z5; u4 += z5; \
|
123 |
+
s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
|
124 |
+
|
125 |
+
static void DCT2D(int32 *p)
|
126 |
+
{
|
127 |
+
int32 c, *q = p;
|
128 |
+
for (c = 7; c >= 0; c--, q += 8)
|
129 |
+
{
|
130 |
+
int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
|
131 |
+
DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
|
132 |
+
q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
|
133 |
+
q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
|
134 |
+
}
|
135 |
+
for (q = p, c = 7; c >= 0; c--, q++)
|
136 |
+
{
|
137 |
+
int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
|
138 |
+
DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
|
139 |
+
q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
|
140 |
+
q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
|
141 |
+
}
|
142 |
+
}
|
143 |
+
|
144 |
+
struct sym_freq { uint m_key, m_sym_index; };
|
145 |
+
|
146 |
+
// Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
|
147 |
+
static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
|
148 |
+
{
|
149 |
+
const uint cMaxPasses = 4;
|
150 |
+
uint32 hist[256 * cMaxPasses]; clear_obj(hist);
|
151 |
+
for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
|
152 |
+
sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
|
153 |
+
uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
|
154 |
+
for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
|
155 |
+
{
|
156 |
+
const uint32* pHist = &hist[pass << 8];
|
157 |
+
uint offsets[256], cur_ofs = 0;
|
158 |
+
for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
|
159 |
+
for (uint i = 0; i < num_syms; i++)
|
160 |
+
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
|
161 |
+
sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
|
162 |
+
}
|
163 |
+
return pCur_syms;
|
164 |
+
}
|
165 |
+
|
166 |
+
// calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
|
167 |
+
static void calculate_minimum_redundancy(sym_freq *A, int n)
|
168 |
+
{
|
169 |
+
int root, leaf, next, avbl, used, dpth;
|
170 |
+
if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
|
171 |
+
A[0].m_key += A[1].m_key; root = 0; leaf = 2;
|
172 |
+
for (next=1; next < n-1; next++)
|
173 |
+
{
|
174 |
+
if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
|
175 |
+
if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
|
176 |
+
}
|
177 |
+
A[n-2].m_key = 0;
|
178 |
+
for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
|
179 |
+
avbl = 1; used = dpth = 0; root = n-2; next = n-1;
|
180 |
+
while (avbl>0)
|
181 |
+
{
|
182 |
+
while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
|
183 |
+
while (avbl>used) { A[next--].m_key = dpth; avbl--; }
|
184 |
+
avbl = 2*used; dpth++; used = 0;
|
185 |
+
}
|
186 |
+
}
|
187 |
+
|
188 |
+
// Limits canonical Huffman code table's max code size to max_code_size.
|
189 |
+
static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
|
190 |
+
{
|
191 |
+
if (code_list_len <= 1) return;
|
192 |
+
|
193 |
+
for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
|
194 |
+
|
195 |
+
uint32 total = 0;
|
196 |
+
for (int i = max_code_size; i > 0; i--)
|
197 |
+
total += (((uint32)pNum_codes[i]) << (max_code_size - i));
|
198 |
+
|
199 |
+
while (total != (1UL << max_code_size))
|
200 |
+
{
|
201 |
+
pNum_codes[max_code_size]--;
|
202 |
+
for (int i = max_code_size - 1; i > 0; i--)
|
203 |
+
{
|
204 |
+
if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
|
205 |
+
}
|
206 |
+
total--;
|
207 |
+
}
|
208 |
+
}
|
209 |
+
|
210 |
+
// Generates an optimized offman table.
|
211 |
+
void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
|
212 |
+
{
|
213 |
+
sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
|
214 |
+
syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
|
215 |
+
int num_used_syms = 1;
|
216 |
+
const uint32 *pSym_count = &m_huff_count[table_num][0];
|
217 |
+
for (int i = 0; i < table_len; i++)
|
218 |
+
if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
|
219 |
+
sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
|
220 |
+
calculate_minimum_redundancy(pSyms, num_used_syms);
|
221 |
+
|
222 |
+
// Count the # of symbols of each code size.
|
223 |
+
int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
|
224 |
+
for (int i = 0; i < num_used_syms; i++)
|
225 |
+
num_codes[pSyms[i].m_key]++;
|
226 |
+
|
227 |
+
const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
|
228 |
+
huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
|
229 |
+
|
230 |
+
// Compute m_huff_bits array, which contains the # of symbols per code size.
|
231 |
+
clear_obj(m_huff_bits[table_num]);
|
232 |
+
for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
|
233 |
+
m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
|
234 |
+
|
235 |
+
// Remove the dummy symbol added above, which must be in largest bucket.
|
236 |
+
for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
|
237 |
+
{
|
238 |
+
if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
|
239 |
+
}
|
240 |
+
|
241 |
+
// Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
|
242 |
+
for (int i = num_used_syms - 1; i >= 1; i--)
|
243 |
+
m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
|
244 |
+
}
|
245 |
+
|
246 |
+
// JPEG marker generation.
|
247 |
+
void jpeg_encoder::emit_byte(uint8 i)
|
248 |
+
{
|
249 |
+
m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
|
250 |
+
}
|
251 |
+
|
252 |
+
void jpeg_encoder::emit_word(uint i)
|
253 |
+
{
|
254 |
+
emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
|
255 |
+
}
|
256 |
+
|
257 |
+
void jpeg_encoder::emit_marker(int marker)
|
258 |
+
{
|
259 |
+
emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
|
260 |
+
}
|
261 |
+
|
262 |
+
// Emit JFIF marker
|
263 |
+
void jpeg_encoder::emit_jfif_app0()
|
264 |
+
{
|
265 |
+
emit_marker(M_APP0);
|
266 |
+
emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
|
267 |
+
emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
|
268 |
+
emit_byte(0);
|
269 |
+
emit_byte(1); /* Major version */
|
270 |
+
emit_byte(1); /* Minor version */
|
271 |
+
emit_byte(0); /* Density unit */
|
272 |
+
emit_word(1);
|
273 |
+
emit_word(1);
|
274 |
+
emit_byte(0); /* No thumbnail image */
|
275 |
+
emit_byte(0);
|
276 |
+
}
|
277 |
+
|
278 |
+
// Emit quantization tables
|
279 |
+
void jpeg_encoder::emit_dqt()
|
280 |
+
{
|
281 |
+
for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
|
282 |
+
{
|
283 |
+
emit_marker(M_DQT);
|
284 |
+
emit_word(64 + 1 + 2);
|
285 |
+
emit_byte(static_cast<uint8>(i));
|
286 |
+
for (int j = 0; j < 64; j++)
|
287 |
+
emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
|
288 |
+
}
|
289 |
+
}
|
290 |
+
|
291 |
+
// Emit start of frame marker
|
292 |
+
void jpeg_encoder::emit_sof()
|
293 |
+
{
|
294 |
+
emit_marker(M_SOF0); /* baseline */
|
295 |
+
emit_word(3 * m_num_components + 2 + 5 + 1);
|
296 |
+
emit_byte(8); /* precision */
|
297 |
+
emit_word(m_image_y);
|
298 |
+
emit_word(m_image_x);
|
299 |
+
emit_byte(m_num_components);
|
300 |
+
for (int i = 0; i < m_num_components; i++)
|
301 |
+
{
|
302 |
+
emit_byte(static_cast<uint8>(i + 1)); /* component ID */
|
303 |
+
emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
|
304 |
+
emit_byte(i > 0); /* quant. table num */
|
305 |
+
}
|
306 |
+
}
|
307 |
+
|
308 |
+
// Emit Huffman table.
|
309 |
+
void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
|
310 |
+
{
|
311 |
+
emit_marker(M_DHT);
|
312 |
+
|
313 |
+
int length = 0;
|
314 |
+
for (int i = 1; i <= 16; i++)
|
315 |
+
length += bits[i];
|
316 |
+
|
317 |
+
emit_word(length + 2 + 1 + 16);
|
318 |
+
emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
|
319 |
+
|
320 |
+
for (int i = 1; i <= 16; i++)
|
321 |
+
emit_byte(bits[i]);
|
322 |
+
|
323 |
+
for (int i = 0; i < length; i++)
|
324 |
+
emit_byte(val[i]);
|
325 |
+
}
|
326 |
+
|
327 |
+
// Emit all Huffman tables.
|
328 |
+
void jpeg_encoder::emit_dhts()
|
329 |
+
{
|
330 |
+
emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
|
331 |
+
emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
|
332 |
+
if (m_num_components == 3)
|
333 |
+
{
|
334 |
+
emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
|
335 |
+
emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
|
336 |
+
}
|
337 |
+
}
|
338 |
+
|
339 |
+
// emit start of scan
|
340 |
+
void jpeg_encoder::emit_sos()
|
341 |
+
{
|
342 |
+
emit_marker(M_SOS);
|
343 |
+
emit_word(2 * m_num_components + 2 + 1 + 3);
|
344 |
+
emit_byte(m_num_components);
|
345 |
+
for (int i = 0; i < m_num_components; i++)
|
346 |
+
{
|
347 |
+
emit_byte(static_cast<uint8>(i + 1));
|
348 |
+
if (i == 0)
|
349 |
+
emit_byte((0 << 4) + 0);
|
350 |
+
else
|
351 |
+
emit_byte((1 << 4) + 1);
|
352 |
+
}
|
353 |
+
emit_byte(0); /* spectral selection */
|
354 |
+
emit_byte(63);
|
355 |
+
emit_byte(0);
|
356 |
+
}
|
357 |
+
|
358 |
+
// Emit all markers at beginning of image file.
|
359 |
+
void jpeg_encoder::emit_markers()
|
360 |
+
{
|
361 |
+
emit_marker(M_SOI);
|
362 |
+
emit_jfif_app0();
|
363 |
+
emit_dqt();
|
364 |
+
emit_sof();
|
365 |
+
emit_dhts();
|
366 |
+
emit_sos();
|
367 |
+
}
|
368 |
+
|
369 |
+
// Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
|
370 |
+
void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
|
371 |
+
{
|
372 |
+
int i, l, last_p, si;
|
373 |
+
uint8 huff_size[257];
|
374 |
+
uint huff_code[257];
|
375 |
+
uint code;
|
376 |
+
|
377 |
+
int p = 0;
|
378 |
+
for (l = 1; l <= 16; l++)
|
379 |
+
for (i = 1; i <= bits[l]; i++)
|
380 |
+
huff_size[p++] = (char)l;
|
381 |
+
|
382 |
+
huff_size[p] = 0; last_p = p; // write sentinel
|
383 |
+
|
384 |
+
code = 0; si = huff_size[0]; p = 0;
|
385 |
+
|
386 |
+
while (huff_size[p])
|
387 |
+
{
|
388 |
+
while (huff_size[p] == si)
|
389 |
+
huff_code[p++] = code++;
|
390 |
+
code <<= 1;
|
391 |
+
si++;
|
392 |
+
}
|
393 |
+
|
394 |
+
memset(codes, 0, sizeof(codes[0])*256);
|
395 |
+
memset(code_sizes, 0, sizeof(code_sizes[0])*256);
|
396 |
+
for (p = 0; p < last_p; p++)
|
397 |
+
{
|
398 |
+
codes[val[p]] = huff_code[p];
|
399 |
+
code_sizes[val[p]] = huff_size[p];
|
400 |
+
}
|
401 |
+
}
|
402 |
+
|
403 |
+
// Quantization table generation.
|
404 |
+
void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
|
405 |
+
{
|
406 |
+
int32 q;
|
407 |
+
if (m_params.m_quality < 50)
|
408 |
+
q = 5000 / m_params.m_quality;
|
409 |
+
else
|
410 |
+
q = 200 - m_params.m_quality * 2;
|
411 |
+
for (int i = 0; i < 64; i++)
|
412 |
+
{
|
413 |
+
int32 j = *pSrc++; j = (j * q + 50L) / 100L;
|
414 |
+
*pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
|
415 |
+
}
|
416 |
+
}
|
417 |
+
|
418 |
+
// Higher-level methods.
|
419 |
+
void jpeg_encoder::first_pass_init()
|
420 |
+
{
|
421 |
+
m_bit_buffer = 0; m_bits_in = 0;
|
422 |
+
memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
|
423 |
+
m_mcu_y_ofs = 0;
|
424 |
+
m_pass_num = 1;
|
425 |
+
}
|
426 |
+
|
427 |
+
bool jpeg_encoder::second_pass_init()
|
428 |
+
{
|
429 |
+
compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
|
430 |
+
compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
|
431 |
+
if (m_num_components > 1)
|
432 |
+
{
|
433 |
+
compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
|
434 |
+
compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
|
435 |
+
}
|
436 |
+
first_pass_init();
|
437 |
+
emit_markers();
|
438 |
+
m_pass_num = 2;
|
439 |
+
return true;
|
440 |
+
}
|
441 |
+
|
442 |
+
bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
|
443 |
+
{
|
444 |
+
m_num_components = 3;
|
445 |
+
switch (m_params.m_subsampling)
|
446 |
+
{
|
447 |
+
case Y_ONLY:
|
448 |
+
{
|
449 |
+
m_num_components = 1;
|
450 |
+
m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
|
451 |
+
m_mcu_x = 8; m_mcu_y = 8;
|
452 |
+
break;
|
453 |
+
}
|
454 |
+
case H1V1:
|
455 |
+
{
|
456 |
+
m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
|
457 |
+
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
458 |
+
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
459 |
+
m_mcu_x = 8; m_mcu_y = 8;
|
460 |
+
break;
|
461 |
+
}
|
462 |
+
case H2V1:
|
463 |
+
{
|
464 |
+
m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
|
465 |
+
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
466 |
+
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
467 |
+
m_mcu_x = 16; m_mcu_y = 8;
|
468 |
+
break;
|
469 |
+
}
|
470 |
+
case H2V2:
|
471 |
+
{
|
472 |
+
m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
|
473 |
+
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
474 |
+
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
475 |
+
m_mcu_x = 16; m_mcu_y = 16;
|
476 |
+
}
|
477 |
+
}
|
478 |
+
|
479 |
+
m_image_x = p_x_res; m_image_y = p_y_res;
|
480 |
+
m_image_bpp = src_channels;
|
481 |
+
m_image_bpl = m_image_x * src_channels;
|
482 |
+
m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
|
483 |
+
m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
|
484 |
+
m_image_bpl_xlt = m_image_x * m_num_components;
|
485 |
+
m_image_bpl_mcu = m_image_x_mcu * m_num_components;
|
486 |
+
m_mcus_per_row = m_image_x_mcu / m_mcu_x;
|
487 |
+
|
488 |
+
if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
|
489 |
+
for (int i = 1; i < m_mcu_y; i++)
|
490 |
+
m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
|
491 |
+
|
492 |
+
compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
|
493 |
+
compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
|
494 |
+
|
495 |
+
m_out_buf_left = JPGE_OUT_BUF_SIZE;
|
496 |
+
m_pOut_buf = m_out_buf;
|
497 |
+
|
498 |
+
if (m_params.m_two_pass_flag)
|
499 |
+
{
|
500 |
+
clear_obj(m_huff_count);
|
501 |
+
first_pass_init();
|
502 |
+
}
|
503 |
+
else
|
504 |
+
{
|
505 |
+
memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
|
506 |
+
memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
|
507 |
+
memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
|
508 |
+
memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
|
509 |
+
if (!second_pass_init()) return false; // in effect, skip over the first pass
|
510 |
+
}
|
511 |
+
return m_all_stream_writes_succeeded;
|
512 |
+
}
|
513 |
+
|
514 |
+
void jpeg_encoder::load_block_8_8_grey(int x)
|
515 |
+
{
|
516 |
+
uint8 *pSrc;
|
517 |
+
sample_array_t *pDst = m_sample_array;
|
518 |
+
x <<= 3;
|
519 |
+
for (int i = 0; i < 8; i++, pDst += 8)
|
520 |
+
{
|
521 |
+
pSrc = m_mcu_lines[i] + x;
|
522 |
+
pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
|
523 |
+
pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
|
524 |
+
}
|
525 |
+
}
|
526 |
+
|
527 |
+
void jpeg_encoder::load_block_8_8(int x, int y, int c)
|
528 |
+
{
|
529 |
+
uint8 *pSrc;
|
530 |
+
sample_array_t *pDst = m_sample_array;
|
531 |
+
x = (x * (8 * 3)) + c;
|
532 |
+
y <<= 3;
|
533 |
+
for (int i = 0; i < 8; i++, pDst += 8)
|
534 |
+
{
|
535 |
+
pSrc = m_mcu_lines[y + i] + x;
|
536 |
+
pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
|
537 |
+
pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
|
538 |
+
}
|
539 |
+
}
|
540 |
+
|
541 |
+
void jpeg_encoder::load_block_16_8(int x, int c)
|
542 |
+
{
|
543 |
+
uint8 *pSrc1, *pSrc2;
|
544 |
+
sample_array_t *pDst = m_sample_array;
|
545 |
+
x = (x * (16 * 3)) + c;
|
546 |
+
int a = 0, b = 2;
|
547 |
+
for (int i = 0; i < 16; i += 2, pDst += 8)
|
548 |
+
{
|
549 |
+
pSrc1 = m_mcu_lines[i + 0] + x;
|
550 |
+
pSrc2 = m_mcu_lines[i + 1] + x;
|
551 |
+
pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
|
552 |
+
pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
|
553 |
+
pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
|
554 |
+
pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
|
555 |
+
int temp = a; a = b; b = temp;
|
556 |
+
}
|
557 |
+
}
|
558 |
+
|
559 |
+
void jpeg_encoder::load_block_16_8_8(int x, int c)
|
560 |
+
{
|
561 |
+
uint8 *pSrc1;
|
562 |
+
sample_array_t *pDst = m_sample_array;
|
563 |
+
x = (x * (16 * 3)) + c;
|
564 |
+
for (int i = 0; i < 8; i++, pDst += 8)
|
565 |
+
{
|
566 |
+
pSrc1 = m_mcu_lines[i + 0] + x;
|
567 |
+
pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
|
568 |
+
pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
|
569 |
+
pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
|
570 |
+
pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
|
571 |
+
}
|
572 |
+
}
|
573 |
+
|
574 |
+
void jpeg_encoder::load_quantized_coefficients(int component_num)
|
575 |
+
{
|
576 |
+
int32 *q = m_quantization_tables[component_num > 0];
|
577 |
+
int16 *pDst = m_coefficient_array;
|
578 |
+
for (int i = 0; i < 64; i++)
|
579 |
+
{
|
580 |
+
sample_array_t j = m_sample_array[s_zag[i]];
|
581 |
+
if (j < 0)
|
582 |
+
{
|
583 |
+
if ((j = -j + (*q >> 1)) < *q)
|
584 |
+
*pDst++ = 0;
|
585 |
+
else
|
586 |
+
*pDst++ = static_cast<int16>(-(j / *q));
|
587 |
+
}
|
588 |
+
else
|
589 |
+
{
|
590 |
+
if ((j = j + (*q >> 1)) < *q)
|
591 |
+
*pDst++ = 0;
|
592 |
+
else
|
593 |
+
*pDst++ = static_cast<int16>((j / *q));
|
594 |
+
}
|
595 |
+
q++;
|
596 |
+
}
|
597 |
+
}
|
598 |
+
|
599 |
+
void jpeg_encoder::flush_output_buffer()
|
600 |
+
{
|
601 |
+
if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
|
602 |
+
m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
|
603 |
+
m_pOut_buf = m_out_buf;
|
604 |
+
m_out_buf_left = JPGE_OUT_BUF_SIZE;
|
605 |
+
}
|
606 |
+
|
607 |
+
void jpeg_encoder::put_bits(uint bits, uint len)
|
608 |
+
{
|
609 |
+
m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
|
610 |
+
while (m_bits_in >= 8)
|
611 |
+
{
|
612 |
+
uint8 c;
|
613 |
+
#define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
|
614 |
+
JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
|
615 |
+
if (c == 0xFF) JPGE_PUT_BYTE(0);
|
616 |
+
m_bit_buffer <<= 8;
|
617 |
+
m_bits_in -= 8;
|
618 |
+
}
|
619 |
+
}
|
620 |
+
|
621 |
+
void jpeg_encoder::code_coefficients_pass_one(int component_num)
|
622 |
+
{
|
623 |
+
if (component_num >= 3) return; // just to shut up static analysis
|
624 |
+
int i, run_len, nbits, temp1;
|
625 |
+
int16 *src = m_coefficient_array;
|
626 |
+
uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
|
627 |
+
|
628 |
+
temp1 = src[0] - m_last_dc_val[component_num];
|
629 |
+
m_last_dc_val[component_num] = src[0];
|
630 |
+
if (temp1 < 0) temp1 = -temp1;
|
631 |
+
|
632 |
+
nbits = 0;
|
633 |
+
while (temp1)
|
634 |
+
{
|
635 |
+
nbits++; temp1 >>= 1;
|
636 |
+
}
|
637 |
+
|
638 |
+
dc_count[nbits]++;
|
639 |
+
for (run_len = 0, i = 1; i < 64; i++)
|
640 |
+
{
|
641 |
+
if ((temp1 = m_coefficient_array[i]) == 0)
|
642 |
+
run_len++;
|
643 |
+
else
|
644 |
+
{
|
645 |
+
while (run_len >= 16)
|
646 |
+
{
|
647 |
+
ac_count[0xF0]++;
|
648 |
+
run_len -= 16;
|
649 |
+
}
|
650 |
+
if (temp1 < 0) temp1 = -temp1;
|
651 |
+
nbits = 1;
|
652 |
+
while (temp1 >>= 1) nbits++;
|
653 |
+
ac_count[(run_len << 4) + nbits]++;
|
654 |
+
run_len = 0;
|
655 |
+
}
|
656 |
+
}
|
657 |
+
if (run_len) ac_count[0]++;
|
658 |
+
}
|
659 |
+
|
660 |
+
void jpeg_encoder::code_coefficients_pass_two(int component_num)
|
661 |
+
{
|
662 |
+
int i, j, run_len, nbits, temp1, temp2;
|
663 |
+
int16 *pSrc = m_coefficient_array;
|
664 |
+
uint *codes[2];
|
665 |
+
uint8 *code_sizes[2];
|
666 |
+
|
667 |
+
if (component_num == 0)
|
668 |
+
{
|
669 |
+
codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
|
670 |
+
code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
|
671 |
+
}
|
672 |
+
else
|
673 |
+
{
|
674 |
+
codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
|
675 |
+
code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
|
676 |
+
}
|
677 |
+
|
678 |
+
temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
|
679 |
+
m_last_dc_val[component_num] = pSrc[0];
|
680 |
+
|
681 |
+
if (temp1 < 0)
|
682 |
+
{
|
683 |
+
temp1 = -temp1; temp2--;
|
684 |
+
}
|
685 |
+
|
686 |
+
nbits = 0;
|
687 |
+
while (temp1)
|
688 |
+
{
|
689 |
+
nbits++; temp1 >>= 1;
|
690 |
+
}
|
691 |
+
|
692 |
+
put_bits(codes[0][nbits], code_sizes[0][nbits]);
|
693 |
+
if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
|
694 |
+
|
695 |
+
for (run_len = 0, i = 1; i < 64; i++)
|
696 |
+
{
|
697 |
+
if ((temp1 = m_coefficient_array[i]) == 0)
|
698 |
+
run_len++;
|
699 |
+
else
|
700 |
+
{
|
701 |
+
while (run_len >= 16)
|
702 |
+
{
|
703 |
+
put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
|
704 |
+
run_len -= 16;
|
705 |
+
}
|
706 |
+
if ((temp2 = temp1) < 0)
|
707 |
+
{
|
708 |
+
temp1 = -temp1;
|
709 |
+
temp2--;
|
710 |
+
}
|
711 |
+
nbits = 1;
|
712 |
+
while (temp1 >>= 1)
|
713 |
+
nbits++;
|
714 |
+
j = (run_len << 4) + nbits;
|
715 |
+
put_bits(codes[1][j], code_sizes[1][j]);
|
716 |
+
put_bits(temp2 & ((1 << nbits) - 1), nbits);
|
717 |
+
run_len = 0;
|
718 |
+
}
|
719 |
+
}
|
720 |
+
if (run_len)
|
721 |
+
put_bits(codes[1][0], code_sizes[1][0]);
|
722 |
+
}
|
723 |
+
|
724 |
+
void jpeg_encoder::code_block(int component_num)
|
725 |
+
{
|
726 |
+
DCT2D(m_sample_array);
|
727 |
+
load_quantized_coefficients(component_num);
|
728 |
+
if (m_pass_num == 1)
|
729 |
+
code_coefficients_pass_one(component_num);
|
730 |
+
else
|
731 |
+
code_coefficients_pass_two(component_num);
|
732 |
+
}
|
733 |
+
|
734 |
+
void jpeg_encoder::process_mcu_row()
|
735 |
+
{
|
736 |
+
if (m_num_components == 1)
|
737 |
+
{
|
738 |
+
for (int i = 0; i < m_mcus_per_row; i++)
|
739 |
+
{
|
740 |
+
load_block_8_8_grey(i); code_block(0);
|
741 |
+
}
|
742 |
+
}
|
743 |
+
else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
|
744 |
+
{
|
745 |
+
for (int i = 0; i < m_mcus_per_row; i++)
|
746 |
+
{
|
747 |
+
load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
|
748 |
+
}
|
749 |
+
}
|
750 |
+
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
|
751 |
+
{
|
752 |
+
for (int i = 0; i < m_mcus_per_row; i++)
|
753 |
+
{
|
754 |
+
load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
|
755 |
+
load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
|
756 |
+
}
|
757 |
+
}
|
758 |
+
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
|
759 |
+
{
|
760 |
+
for (int i = 0; i < m_mcus_per_row; i++)
|
761 |
+
{
|
762 |
+
load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
|
763 |
+
load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
|
764 |
+
load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
|
765 |
+
}
|
766 |
+
}
|
767 |
+
}
|
768 |
+
|
769 |
+
bool jpeg_encoder::terminate_pass_one()
|
770 |
+
{
|
771 |
+
optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
|
772 |
+
if (m_num_components > 1)
|
773 |
+
{
|
774 |
+
optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
|
775 |
+
}
|
776 |
+
return second_pass_init();
|
777 |
+
}
|
778 |
+
|
779 |
+
bool jpeg_encoder::terminate_pass_two()
|
780 |
+
{
|
781 |
+
put_bits(0x7F, 7);
|
782 |
+
flush_output_buffer();
|
783 |
+
emit_marker(M_EOI);
|
784 |
+
m_pass_num++; // purposely bump up m_pass_num, for debugging
|
785 |
+
return true;
|
786 |
+
}
|
787 |
+
|
788 |
+
bool jpeg_encoder::process_end_of_image()
|
789 |
+
{
|
790 |
+
if (m_mcu_y_ofs)
|
791 |
+
{
|
792 |
+
if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
|
793 |
+
{
|
794 |
+
for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
|
795 |
+
memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
|
796 |
+
}
|
797 |
+
|
798 |
+
process_mcu_row();
|
799 |
+
}
|
800 |
+
|
801 |
+
if (m_pass_num == 1)
|
802 |
+
return terminate_pass_one();
|
803 |
+
else
|
804 |
+
return terminate_pass_two();
|
805 |
+
}
|
806 |
+
|
807 |
+
void jpeg_encoder::load_mcu(const void *pSrc)
|
808 |
+
{
|
809 |
+
const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
|
810 |
+
|
811 |
+
uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
|
812 |
+
|
813 |
+
if (m_num_components == 1)
|
814 |
+
{
|
815 |
+
if (m_image_bpp == 4)
|
816 |
+
RGBA_to_Y(pDst, Psrc, m_image_x);
|
817 |
+
else if (m_image_bpp == 3)
|
818 |
+
RGB_to_Y(pDst, Psrc, m_image_x);
|
819 |
+
else
|
820 |
+
memcpy(pDst, Psrc, m_image_x);
|
821 |
+
}
|
822 |
+
else
|
823 |
+
{
|
824 |
+
if (m_image_bpp == 4)
|
825 |
+
RGBA_to_YCC(pDst, Psrc, m_image_x);
|
826 |
+
else if (m_image_bpp == 3)
|
827 |
+
RGB_to_YCC(pDst, Psrc, m_image_x);
|
828 |
+
else
|
829 |
+
Y_to_YCC(pDst, Psrc, m_image_x);
|
830 |
+
}
|
831 |
+
|
832 |
+
// Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
|
833 |
+
if (m_num_components == 1)
|
834 |
+
memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
|
835 |
+
else
|
836 |
+
{
|
837 |
+
const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
|
838 |
+
uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
|
839 |
+
for (int i = m_image_x; i < m_image_x_mcu; i++)
|
840 |
+
{
|
841 |
+
*q++ = y; *q++ = cb; *q++ = cr;
|
842 |
+
}
|
843 |
+
}
|
844 |
+
|
845 |
+
if (++m_mcu_y_ofs == m_mcu_y)
|
846 |
+
{
|
847 |
+
process_mcu_row();
|
848 |
+
m_mcu_y_ofs = 0;
|
849 |
+
}
|
850 |
+
}
|
851 |
+
|
852 |
+
void jpeg_encoder::clear()
|
853 |
+
{
|
854 |
+
m_mcu_lines[0] = NULL;
|
855 |
+
m_pass_num = 0;
|
856 |
+
m_all_stream_writes_succeeded = true;
|
857 |
+
}
|
858 |
+
|
859 |
+
jpeg_encoder::jpeg_encoder()
|
860 |
+
{
|
861 |
+
clear();
|
862 |
+
}
|
863 |
+
|
864 |
+
jpeg_encoder::~jpeg_encoder()
|
865 |
+
{
|
866 |
+
deinit();
|
867 |
+
}
|
868 |
+
|
869 |
+
bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
|
870 |
+
{
|
871 |
+
deinit();
|
872 |
+
if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
|
873 |
+
m_pStream = pStream;
|
874 |
+
m_params = comp_params;
|
875 |
+
return jpg_open(width, height, src_channels);
|
876 |
+
}
|
877 |
+
|
878 |
+
void jpeg_encoder::deinit()
|
879 |
+
{
|
880 |
+
jpge_free(m_mcu_lines[0]);
|
881 |
+
clear();
|
882 |
+
}
|
883 |
+
|
884 |
+
bool jpeg_encoder::process_scanline(const void* pScanline)
|
885 |
+
{
|
886 |
+
if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
|
887 |
+
if (m_all_stream_writes_succeeded)
|
888 |
+
{
|
889 |
+
if (!pScanline)
|
890 |
+
{
|
891 |
+
if (!process_end_of_image()) return false;
|
892 |
+
}
|
893 |
+
else
|
894 |
+
{
|
895 |
+
load_mcu(pScanline);
|
896 |
+
}
|
897 |
+
}
|
898 |
+
return m_all_stream_writes_succeeded;
|
899 |
+
}
|
900 |
+
|
901 |
+
// Higher level wrappers/examples (optional).
|
902 |
+
#include <stdio.h>
|
903 |
+
|
904 |
+
class cfile_stream : public output_stream
|
905 |
+
{
|
906 |
+
cfile_stream(const cfile_stream &);
|
907 |
+
cfile_stream &operator= (const cfile_stream &);
|
908 |
+
|
909 |
+
FILE* m_pFile;
|
910 |
+
bool m_bStatus;
|
911 |
+
|
912 |
+
public:
|
913 |
+
cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
|
914 |
+
|
915 |
+
virtual ~cfile_stream()
|
916 |
+
{
|
917 |
+
close();
|
918 |
+
}
|
919 |
+
|
920 |
+
bool open(const char *pFilename)
|
921 |
+
{
|
922 |
+
close();
|
923 |
+
#if defined(_MSC_VER)
|
924 |
+
if (fopen_s(&m_pFile, pFilename, "wb") != 0)
|
925 |
+
{
|
926 |
+
return false;
|
927 |
+
}
|
928 |
+
#else
|
929 |
+
m_pFile = fopen(pFilename, "wb");
|
930 |
+
#endif
|
931 |
+
m_bStatus = (m_pFile != NULL);
|
932 |
+
return m_bStatus;
|
933 |
+
}
|
934 |
+
|
935 |
+
bool close()
|
936 |
+
{
|
937 |
+
if (m_pFile)
|
938 |
+
{
|
939 |
+
if (fclose(m_pFile) == EOF)
|
940 |
+
{
|
941 |
+
m_bStatus = false;
|
942 |
+
}
|
943 |
+
m_pFile = NULL;
|
944 |
+
}
|
945 |
+
return m_bStatus;
|
946 |
+
}
|
947 |
+
|
948 |
+
virtual bool put_buf(const void* pBuf, int64_t len)
|
949 |
+
{
|
950 |
+
m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
|
951 |
+
return m_bStatus;
|
952 |
+
}
|
953 |
+
|
954 |
+
uint get_size() const
|
955 |
+
{
|
956 |
+
return m_pFile ? ftell(m_pFile) : 0;
|
957 |
+
}
|
958 |
+
};
|
959 |
+
|
960 |
+
// Writes JPEG image to file.
|
961 |
+
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
|
962 |
+
{
|
963 |
+
cfile_stream dst_stream;
|
964 |
+
if (!dst_stream.open(pFilename))
|
965 |
+
return false;
|
966 |
+
|
967 |
+
jpge::jpeg_encoder dst_image;
|
968 |
+
if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
|
969 |
+
return false;
|
970 |
+
|
971 |
+
for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
|
972 |
+
{
|
973 |
+
for (int64_t i = 0; i < height; i++)
|
974 |
+
{
|
975 |
+
// i, width, and num_channels are all 64bit
|
976 |
+
const uint8* pBuf = pImage_data + i * width * num_channels;
|
977 |
+
if (!dst_image.process_scanline(pBuf))
|
978 |
+
return false;
|
979 |
+
}
|
980 |
+
if (!dst_image.process_scanline(NULL))
|
981 |
+
return false;
|
982 |
+
}
|
983 |
+
|
984 |
+
dst_image.deinit();
|
985 |
+
|
986 |
+
return dst_stream.close();
|
987 |
+
}
|
988 |
+
|
989 |
+
class memory_stream : public output_stream
|
990 |
+
{
|
991 |
+
memory_stream(const memory_stream &);
|
992 |
+
memory_stream &operator= (const memory_stream &);
|
993 |
+
|
994 |
+
uint8 *m_pBuf;
|
995 |
+
uint64_t m_buf_size, m_buf_ofs;
|
996 |
+
|
997 |
+
public:
|
998 |
+
memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
|
999 |
+
|
1000 |
+
virtual ~memory_stream() { }
|
1001 |
+
|
1002 |
+
virtual bool put_buf(const void* pBuf, int64_t len)
|
1003 |
+
{
|
1004 |
+
uint64_t buf_remaining = m_buf_size - m_buf_ofs;
|
1005 |
+
if ((uint64_t)len > buf_remaining)
|
1006 |
+
return false;
|
1007 |
+
memcpy(m_pBuf + m_buf_ofs, pBuf, len);
|
1008 |
+
m_buf_ofs += len;
|
1009 |
+
return true;
|
1010 |
+
}
|
1011 |
+
|
1012 |
+
uint64_t get_size() const
|
1013 |
+
{
|
1014 |
+
return m_buf_ofs;
|
1015 |
+
}
|
1016 |
+
};
|
1017 |
+
|
1018 |
+
bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
|
1019 |
+
{
|
1020 |
+
if ((!pDstBuf) || (!buf_size))
|
1021 |
+
return false;
|
1022 |
+
|
1023 |
+
memory_stream dst_stream(pDstBuf, buf_size);
|
1024 |
+
|
1025 |
+
buf_size = 0;
|
1026 |
+
|
1027 |
+
jpge::jpeg_encoder dst_image;
|
1028 |
+
if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
|
1029 |
+
return false;
|
1030 |
+
|
1031 |
+
for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
|
1032 |
+
{
|
1033 |
+
for (int64_t i = 0; i < height; i++)
|
1034 |
+
{
|
1035 |
+
const uint8* pScanline = pImage_data + i * width * num_channels;
|
1036 |
+
if (!dst_image.process_scanline(pScanline))
|
1037 |
+
return false;
|
1038 |
+
}
|
1039 |
+
if (!dst_image.process_scanline(NULL))
|
1040 |
+
return false;
|
1041 |
+
}
|
1042 |
+
|
1043 |
+
dst_image.deinit();
|
1044 |
+
|
1045 |
+
buf_size = dst_stream.get_size();
|
1046 |
+
return true;
|
1047 |
+
}
|
1048 |
+
|
1049 |
+
} // namespace jpge
|
crazy_functions/test_project/Cpp/libJPG/jpge.h
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
// jpge.h - C++ class for JPEG compression.
|
3 |
+
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
4 |
+
// Alex Evans: Added RGBA support, linear memory allocator.
|
5 |
+
#ifndef JPEG_ENCODER_H
|
6 |
+
#define JPEG_ENCODER_H
|
7 |
+
|
8 |
+
#include <stdint.h>
|
9 |
+
|
10 |
+
namespace jpge
|
11 |
+
{
|
12 |
+
typedef unsigned char uint8;
|
13 |
+
typedef signed short int16;
|
14 |
+
typedef signed int int32;
|
15 |
+
typedef unsigned short uint16;
|
16 |
+
typedef unsigned int uint32;
|
17 |
+
typedef unsigned int uint;
|
18 |
+
|
19 |
+
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
|
20 |
+
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
|
21 |
+
|
22 |
+
// JPEG compression parameters structure.
|
23 |
+
struct params
|
24 |
+
{
|
25 |
+
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
|
26 |
+
|
27 |
+
inline bool check_valid() const
|
28 |
+
{
|
29 |
+
if ((m_quality < 1) || (m_quality > 100)) return false;
|
30 |
+
if ((uint)m_subsampling > (uint)H2V2) return false;
|
31 |
+
return true;
|
32 |
+
}
|
33 |
+
|
34 |
+
// Quality: 1-100, higher is better. Typical values are around 50-95.
|
35 |
+
int m_quality;
|
36 |
+
|
37 |
+
// m_subsampling:
|
38 |
+
// 0 = Y (grayscale) only
|
39 |
+
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
|
40 |
+
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
|
41 |
+
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
|
42 |
+
subsampling_t m_subsampling;
|
43 |
+
|
44 |
+
// Disables CbCr discrimination - only intended for testing.
|
45 |
+
// If true, the Y quantization table is also used for the CbCr channels.
|
46 |
+
bool m_no_chroma_discrim_flag;
|
47 |
+
|
48 |
+
bool m_two_pass_flag;
|
49 |
+
};
|
50 |
+
|
51 |
+
// Writes JPEG image to a file.
|
52 |
+
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
|
53 |
+
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
54 |
+
|
55 |
+
// Writes JPEG image to memory buffer.
|
56 |
+
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
|
57 |
+
// If return value is true, buf_size will be set to the size of the compressed data.
|
58 |
+
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
59 |
+
|
60 |
+
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
|
61 |
+
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
|
62 |
+
class output_stream
|
63 |
+
{
|
64 |
+
public:
|
65 |
+
virtual ~output_stream() { };
|
66 |
+
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
|
67 |
+
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
|
68 |
+
};
|
69 |
+
|
70 |
+
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
|
71 |
+
class jpeg_encoder
|
72 |
+
{
|
73 |
+
public:
|
74 |
+
jpeg_encoder();
|
75 |
+
~jpeg_encoder();
|
76 |
+
|
77 |
+
// Initializes the compressor.
|
78 |
+
// pStream: The stream object to use for writing compressed data.
|
79 |
+
// params - Compression parameters structure, defined above.
|
80 |
+
// width, height - Image dimensions.
|
81 |
+
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
|
82 |
+
// Returns false on out of memory or if a stream write fails.
|
83 |
+
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
|
84 |
+
|
85 |
+
const params &get_params() const { return m_params; }
|
86 |
+
|
87 |
+
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
|
88 |
+
void deinit();
|
89 |
+
|
90 |
+
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
|
91 |
+
inline uint get_cur_pass() { return m_pass_num; }
|
92 |
+
|
93 |
+
// Call this method with each source scanline.
|
94 |
+
// width * src_channels bytes per scanline is expected (RGB or Y format).
|
95 |
+
// You must call with NULL after all scanlines are processed to finish compression.
|
96 |
+
// Returns false on out of memory or if a stream write fails.
|
97 |
+
bool process_scanline(const void* pScanline);
|
98 |
+
|
99 |
+
private:
|
100 |
+
jpeg_encoder(const jpeg_encoder &);
|
101 |
+
jpeg_encoder &operator =(const jpeg_encoder &);
|
102 |
+
|
103 |
+
typedef int32 sample_array_t;
|
104 |
+
|
105 |
+
output_stream *m_pStream;
|
106 |
+
params m_params;
|
107 |
+
uint8 m_num_components;
|
108 |
+
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
|
109 |
+
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
|
110 |
+
int m_image_x_mcu, m_image_y_mcu;
|
111 |
+
int m_image_bpl_xlt, m_image_bpl_mcu;
|
112 |
+
int m_mcus_per_row;
|
113 |
+
int m_mcu_x, m_mcu_y;
|
114 |
+
uint8 *m_mcu_lines[16];
|
115 |
+
uint8 m_mcu_y_ofs;
|
116 |
+
sample_array_t m_sample_array[64];
|
117 |
+
int16 m_coefficient_array[64];
|
118 |
+
int32 m_quantization_tables[2][64];
|
119 |
+
uint m_huff_codes[4][256];
|
120 |
+
uint8 m_huff_code_sizes[4][256];
|
121 |
+
uint8 m_huff_bits[4][17];
|
122 |
+
uint8 m_huff_val[4][256];
|
123 |
+
uint32 m_huff_count[4][256];
|
124 |
+
int m_last_dc_val[3];
|
125 |
+
enum { JPGE_OUT_BUF_SIZE = 2048 };
|
126 |
+
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
|
127 |
+
uint8 *m_pOut_buf;
|
128 |
+
uint m_out_buf_left;
|
129 |
+
uint32 m_bit_buffer;
|
130 |
+
uint m_bits_in;
|
131 |
+
uint8 m_pass_num;
|
132 |
+
bool m_all_stream_writes_succeeded;
|
133 |
+
|
134 |
+
void optimize_huffman_table(int table_num, int table_len);
|
135 |
+
void emit_byte(uint8 i);
|
136 |
+
void emit_word(uint i);
|
137 |
+
void emit_marker(int marker);
|
138 |
+
void emit_jfif_app0();
|
139 |
+
void emit_dqt();
|
140 |
+
void emit_sof();
|
141 |
+
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
|
142 |
+
void emit_dhts();
|
143 |
+
void emit_sos();
|
144 |
+
void emit_markers();
|
145 |
+
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
|
146 |
+
void compute_quant_table(int32 *dst, int16 *src);
|
147 |
+
void adjust_quant_table(int32 *dst, int32 *src);
|
148 |
+
void first_pass_init();
|
149 |
+
bool second_pass_init();
|
150 |
+
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
|
151 |
+
void load_block_8_8_grey(int x);
|
152 |
+
void load_block_8_8(int x, int y, int c);
|
153 |
+
void load_block_16_8(int x, int c);
|
154 |
+
void load_block_16_8_8(int x, int c);
|
155 |
+
void load_quantized_coefficients(int component_num);
|
156 |
+
void flush_output_buffer();
|
157 |
+
void put_bits(uint bits, uint len);
|
158 |
+
void code_coefficients_pass_one(int component_num);
|
159 |
+
void code_coefficients_pass_two(int component_num);
|
160 |
+
void code_block(int component_num);
|
161 |
+
void process_mcu_row();
|
162 |
+
bool terminate_pass_one();
|
163 |
+
bool terminate_pass_two();
|
164 |
+
bool process_end_of_image();
|
165 |
+
void load_mcu(const void* src);
|
166 |
+
void clear();
|
167 |
+
void init();
|
168 |
+
};
|
169 |
+
|
170 |
+
} // namespace jpge
|
171 |
+
|
172 |
+
#endif // JPEG_ENCODER
|
crazy_functions/test_project/Cpp/libJPG/来源
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
jpge.h - C++ class for JPEG compression.
|
2 |
+
Public domain, Rich Geldreich <richgel99@gmail.com>
|
3 |
+
Alex Evans: Added RGBA support, linear memory allocator.
|
crazy_functions/test_project/latex/attention/background.tex
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}.
|
2 |
+
|
3 |
+
Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}.
|
4 |
+
|
5 |
+
End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}.
|
6 |
+
|
7 |
+
To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.
|
8 |
+
In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}.
|
9 |
+
|
10 |
+
|
11 |
+
%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
12 |
+
|
13 |
+
%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation.
|
14 |
+
|
15 |
+
%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
16 |
+
|
17 |
+
%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost.
|
18 |
+
|
19 |
+
%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length.
|
20 |
+
|
21 |
+
%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
22 |
+
|
23 |
+
%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)?
|
28 |
+
|
29 |
+
%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence.
|
30 |
+
|
31 |
+
%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model.
|
32 |
+
|
33 |
+
%\begin{table}[h!]
|
34 |
+
%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.}
|
35 |
+
%\label{tab:op_complexities}
|
36 |
+
%\begin{center}
|
37 |
+
%\vspace{-5pt}
|
38 |
+
%\scalebox{0.75}{
|
39 |
+
|
40 |
+
%\begin{tabular}{l|c|c|c}
|
41 |
+
%\hline \hline
|
42 |
+
%Layer Type & Receptive & Complexity & Sequential \\
|
43 |
+
% & Field & & Operations \\
|
44 |
+
%\hline
|
45 |
+
%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\
|
46 |
+
%\hline
|
47 |
+
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
|
48 |
+
%\hline
|
49 |
+
%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\
|
50 |
+
%\hline
|
51 |
+
%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\
|
52 |
+
%\hline
|
53 |
+
%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\
|
54 |
+
%\hline \hline
|
55 |
+
%\end{tabular}
|
56 |
+
%}
|
57 |
+
%\end{center}
|
58 |
+
%\end{table}
|
crazy_functions/test_project/latex/attention/introduction.tex
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
2 |
+
|
3 |
+
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
4 |
+
%\marginpar{not sure if the memory constraints are understandable here}
|
5 |
+
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
6 |
+
|
7 |
+
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
8 |
+
|
9 |
+
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
10 |
+
|
11 |
+
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
12 |
+
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
13 |
+
|
14 |
+
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
15 |
+
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
16 |
+
|
17 |
+
% Just a standard paragraph with citations, rewrite.
|
18 |
+
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
crazy_functions/test_project/latex/attention/model_architecture.tex
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
\begin{figure}
|
3 |
+
\centering
|
4 |
+
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
5 |
+
\caption{The Transformer - model architecture.}
|
6 |
+
\label{fig:model-arch}
|
7 |
+
\end{figure}
|
8 |
+
|
9 |
+
% Although the primary workhorse of our model is attention,
|
10 |
+
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
11 |
+
|
12 |
+
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
13 |
+
|
14 |
+
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
15 |
+
|
16 |
+
\subsection{Encoder and Decoder Stacks}
|
17 |
+
|
18 |
+
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
19 |
+
|
20 |
+
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
21 |
+
|
22 |
+
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
23 |
+
|
24 |
+
\subsection{Attention} \label{sec:attention}
|
25 |
+
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
26 |
+
|
27 |
+
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
28 |
+
|
29 |
+
% \begin{figure}
|
30 |
+
% \centering
|
31 |
+
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
32 |
+
% \caption{Scaled Dot-Product Attention.}
|
33 |
+
% \label{fig:multi-head-att}
|
34 |
+
% \end{figure}
|
35 |
+
|
36 |
+
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
37 |
+
|
38 |
+
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
39 |
+
|
40 |
+
\begin{equation}
|
41 |
+
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
42 |
+
\end{equation}
|
43 |
+
|
44 |
+
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
45 |
+
|
46 |
+
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
47 |
+
|
48 |
+
% Already described in the subsequent section
|
49 |
+
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
50 |
+
|
51 |
+
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
52 |
+
|
53 |
+
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
54 |
+
|
55 |
+
|
56 |
+
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
57 |
+
|
58 |
+
|
59 |
+
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
60 |
+
|
61 |
+
\begin{figure}
|
62 |
+
\begin{minipage}[t]{0.5\textwidth}
|
63 |
+
\centering
|
64 |
+
Scaled Dot-Product Attention \\
|
65 |
+
\vspace{0.5cm}
|
66 |
+
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
67 |
+
\end{minipage}
|
68 |
+
\begin{minipage}[t]{0.5\textwidth}
|
69 |
+
\centering
|
70 |
+
Multi-Head Attention \\
|
71 |
+
\vspace{0.1cm}
|
72 |
+
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
73 |
+
\end{minipage}
|
74 |
+
|
75 |
+
|
76 |
+
% \centering
|
77 |
+
|
78 |
+
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
79 |
+
\label{fig:multi-head-att}
|
80 |
+
\end{figure}
|
81 |
+
|
82 |
+
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
83 |
+
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
84 |
+
|
85 |
+
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
86 |
+
|
87 |
+
\begin{align*}
|
88 |
+
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
89 |
+
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
90 |
+
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
91 |
+
\end{align*}
|
92 |
+
|
93 |
+
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
94 |
+
|
95 |
+
|
96 |
+
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
97 |
+
|
98 |
+
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
99 |
+
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
100 |
+
|
101 |
+
\subsubsection{Applications of Attention in our Model}
|
102 |
+
|
103 |
+
The Transformer uses multi-head attention in three different ways:
|
104 |
+
\begin{itemize}
|
105 |
+
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
106 |
+
|
107 |
+
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
108 |
+
|
109 |
+
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
110 |
+
|
111 |
+
\end{itemize}
|
112 |
+
|
113 |
+
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
114 |
+
|
115 |
+
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
116 |
+
|
117 |
+
\begin{equation}
|
118 |
+
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
119 |
+
\end{equation}
|
120 |
+
|
121 |
+
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
126 |
+
|
127 |
+
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
128 |
+
|
129 |
+
|
130 |
+
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
131 |
+
%\begin{equation*} \label{eq:attention}
|
132 |
+
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
133 |
+
%\end{equation*}
|
134 |
+
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
135 |
+
|
136 |
+
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
137 |
+
%\marginpar{}
|
138 |
+
|
139 |
+
\subsection{Embeddings and Softmax}
|
140 |
+
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
141 |
+
|
142 |
+
|
143 |
+
\subsection{Positional Encoding}
|
144 |
+
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
145 |
+
|
146 |
+
In this work, we use sine and cosine functions of different frequencies:
|
147 |
+
|
148 |
+
\begin{align*}
|
149 |
+
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
150 |
+
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
151 |
+
\end{align*}
|
152 |
+
|
153 |
+
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
154 |
+
|
155 |
+
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
crazy_functions/test_project/latex/attention/parameter_attention.tex
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
\pagebreak
|
2 |
+
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
|
3 |
+
|
4 |
+
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
|
5 |
+
|
6 |
+
\begin{align*}
|
7 |
+
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
|
8 |
+
A(q, K, V) = Softmax(qK^T)V
|
9 |
+
\end{align*}
|
10 |
+
|
11 |
+
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
|
12 |
+
|
13 |
+
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
|
14 |
+
|
15 |
+
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
|
16 |
+
|
17 |
+
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
|
18 |
+
|
19 |
+
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
|
20 |
+
|
21 |
+
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
|
22 |
+
|
23 |
+
\begin{table}[h]
|
24 |
+
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
|
25 |
+
\label{tab:parameter_attention}
|
26 |
+
\begin{center}
|
27 |
+
\vspace{-2mm}
|
28 |
+
%\scalebox{1.0}{
|
29 |
+
\begin{tabular}{c|cccccc|cccc}
|
30 |
+
\hline\rule{0pt}{2.0ex}
|
31 |
+
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
|
32 |
+
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
|
33 |
+
\multirow{2}{*}{$n_p$} &
|
34 |
+
PPL & BLEU & params & training\\
|
35 |
+
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
|
36 |
+
\hline\rule{0pt}{2.0ex}
|
37 |
+
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
|
38 |
+
\hline\rule{0pt}{2.0ex}
|
39 |
+
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
|
40 |
+
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
|
41 |
+
\hline
|
42 |
+
\end{tabular}
|
43 |
+
%}
|
44 |
+
\end{center}
|
45 |
+
\end{table}
|
crazy_functions/test_project/latex/attention/results.tex
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
\subsection{Machine Translation}
|
2 |
+
\begin{table}[t]
|
3 |
+
\begin{center}
|
4 |
+
\caption{The Transformer achieves better BLEU scores than previous state-of-the-art models on the English-to-German and English-to-French newstest2014 tests at a fraction of the training cost. }
|
5 |
+
\label{tab:wmt-results}
|
6 |
+
\vspace{-2mm}
|
7 |
+
%\scalebox{1.0}{
|
8 |
+
\begin{tabular}{lccccc}
|
9 |
+
\toprule
|
10 |
+
\multirow{2}{*}{\vspace{-2mm}Model} & \multicolumn{2}{c}{BLEU} & & \multicolumn{2}{c}{Training Cost (FLOPs)} \\
|
11 |
+
\cmidrule{2-3} \cmidrule{5-6}
|
12 |
+
& EN-DE & EN-FR & & EN-DE & EN-FR \\
|
13 |
+
\hline
|
14 |
+
ByteNet \citep{NalBytenet2017} & 23.75 & & & &\\
|
15 |
+
Deep-Att + PosUnk \citep{DBLP:journals/corr/ZhouCWLX16} & & 39.2 & & & $1.0\cdot10^{20}$ \\
|
16 |
+
GNMT + RL \citep{wu2016google} & 24.6 & 39.92 & & $2.3\cdot10^{19}$ & $1.4\cdot10^{20}$\\
|
17 |
+
ConvS2S \citep{JonasFaceNet2017} & 25.16 & 40.46 & & $9.6\cdot10^{18}$ & $1.5\cdot10^{20}$\\
|
18 |
+
MoE \citep{shazeer2017outrageously} & 26.03 & 40.56 & & $2.0\cdot10^{19}$ & $1.2\cdot10^{20}$ \\
|
19 |
+
\hline
|
20 |
+
\rule{0pt}{2.0ex}Deep-Att + PosUnk Ensemble \citep{DBLP:journals/corr/ZhouCWLX16} & & 40.4 & & &
|
21 |
+
$8.0\cdot10^{20}$ \\
|
22 |
+
GNMT + RL Ensemble \citep{wu2016google} & 26.30 & 41.16 & & $1.8\cdot10^{20}$ & $1.1\cdot10^{21}$\\
|
23 |
+
ConvS2S Ensemble \citep{JonasFaceNet2017} & 26.36 & \textbf{41.29} & & $7.7\cdot10^{19}$ & $1.2\cdot10^{21}$\\
|
24 |
+
\specialrule{1pt}{-1pt}{0pt}
|
25 |
+
\rule{0pt}{2.2ex}Transformer (base model) & 27.3 & 38.1 & & \multicolumn{2}{c}{\boldmath$3.3\cdot10^{18}$}\\
|
26 |
+
Transformer (big) & \textbf{28.4} & \textbf{41.8} & & \multicolumn{2}{c}{$2.3\cdot10^{19}$} \\
|
27 |
+
%\hline
|
28 |
+
%\specialrule{1pt}{-1pt}{0pt}
|
29 |
+
%\rule{0pt}{2.0ex}
|
30 |
+
\bottomrule
|
31 |
+
\end{tabular}
|
32 |
+
%}
|
33 |
+
\end{center}
|
34 |
+
\end{table}
|
35 |
+
|
36 |
+
|
37 |
+
On the WMT 2014 English-to-German translation task, the big transformer model (Transformer (big) in Table~\ref{tab:wmt-results}) outperforms the best previously reported models (including ensembles) by more than $2.0$ BLEU, establishing a new state-of-the-art BLEU score of $28.4$. The configuration of this model is listed in the bottom line of Table~\ref{tab:variations}. Training took $3.5$ days on $8$ P100 GPUs. Even our base model surpasses all previously published models and ensembles, at a fraction of the training cost of any of the competitive models.
|
38 |
+
|
39 |
+
On the WMT 2014 English-to-French translation task, our big model achieves a BLEU score of $41.0$, outperforming all of the previously published single models, at less than $1/4$ the training cost of the previous state-of-the-art model. The Transformer (big) model trained for English-to-French used dropout rate $P_{drop}=0.1$, instead of $0.3$.
|
40 |
+
|
41 |
+
For the base models, we used a single model obtained by averaging the last 5 checkpoints, which were written at 10-minute intervals. For the big models, we averaged the last 20 checkpoints. We used beam search with a beam size of $4$ and length penalty $\alpha=0.6$ \citep{wu2016google}. These hyperparameters were chosen after experimentation on the development set. We set the maximum output length during inference to input length + $50$, but terminate early when possible \citep{wu2016google}.
|
42 |
+
|
43 |
+
Table \ref{tab:wmt-results} summarizes our results and compares our translation quality and training costs to other model architectures from the literature. We estimate the number of floating point operations used to train a model by multiplying the training time, the number of GPUs used, and an estimate of the sustained single-precision floating-point capacity of each GPU \footnote{We used values of 2.8, 3.7, 6.0 and 9.5 TFLOPS for K80, K40, M40 and P100, respectively.}.
|
44 |
+
%where we compare against the leading machine translation results in the literature. Even our smaller model, with number of parameters comparable to ConvS2S, outperforms all existing single models, and achieves results close to the best ensemble model.
|
45 |
+
|
46 |
+
\subsection{Model Variations}
|
47 |
+
|
48 |
+
\begin{table}[t]
|
49 |
+
\caption{Variations on the Transformer architecture. Unlisted values are identical to those of the base model. All metrics are on the English-to-German translation development set, newstest2013. Listed perplexities are per-wordpiece, according to our byte-pair encoding, and should not be compared to per-word perplexities.}
|
50 |
+
\label{tab:variations}
|
51 |
+
\begin{center}
|
52 |
+
\vspace{-2mm}
|
53 |
+
%\scalebox{1.0}{
|
54 |
+
\begin{tabular}{c|ccccccccc|ccc}
|
55 |
+
\hline\rule{0pt}{2.0ex}
|
56 |
+
& \multirow{2}{*}{$N$} & \multirow{2}{*}{$\dmodel$} &
|
57 |
+
\multirow{2}{*}{$\dff$} & \multirow{2}{*}{$h$} &
|
58 |
+
\multirow{2}{*}{$d_k$} & \multirow{2}{*}{$d_v$} &
|
59 |
+
\multirow{2}{*}{$P_{drop}$} & \multirow{2}{*}{$\epsilon_{ls}$} &
|
60 |
+
train & PPL & BLEU & params \\
|
61 |
+
& & & & & & & & & steps & (dev) & (dev) & $\times10^6$ \\
|
62 |
+
% & & & & & & & & & & & & \\
|
63 |
+
\hline\rule{0pt}{2.0ex}
|
64 |
+
base & 6 & 512 & 2048 & 8 & 64 & 64 & 0.1 & 0.1 & 100K & 4.92 & 25.8 & 65 \\
|
65 |
+
\hline\rule{0pt}{2.0ex}
|
66 |
+
\multirow{4}{*}{(A)}
|
67 |
+
& & & & 1 & 512 & 512 & & & & 5.29 & 24.9 & \\
|
68 |
+
& & & & 4 & 128 & 128 & & & & 5.00 & 25.5 & \\
|
69 |
+
& & & & 16 & 32 & 32 & & & & 4.91 & 25.8 & \\
|
70 |
+
& & & & 32 & 16 & 16 & & & & 5.01 & 25.4 & \\
|
71 |
+
\hline\rule{0pt}{2.0ex}
|
72 |
+
\multirow{2}{*}{(B)}
|
73 |
+
& & & & & 16 & & & & & 5.16 & 25.1 & 58 \\
|
74 |
+
& & & & & 32 & & & & & 5.01 & 25.4 & 60 \\
|
75 |
+
\hline\rule{0pt}{2.0ex}
|
76 |
+
\multirow{7}{*}{(C)}
|
77 |
+
& 2 & & & & & & & & & 6.11 & 23.7 & 36 \\
|
78 |
+
& 4 & & & & & & & & & 5.19 & 25.3 & 50 \\
|
79 |
+
& 8 & & & & & & & & & 4.88 & 25.5 & 80 \\
|
80 |
+
& & 256 & & & 32 & 32 & & & & 5.75 & 24.5 & 28 \\
|
81 |
+
& & 1024 & & & 128 & 128 & & & & 4.66 & 26.0 & 168 \\
|
82 |
+
& & & 1024 & & & & & & & 5.12 & 25.4 & 53 \\
|
83 |
+
& & & 4096 & & & & & & & 4.75 & 26.2 & 90 \\
|
84 |
+
\hline\rule{0pt}{2.0ex}
|
85 |
+
\multirow{4}{*}{(D)}
|
86 |
+
& & & & & & & 0.0 & & & 5.77 & 24.6 & \\
|
87 |
+
& & & & & & & 0.2 & & & 4.95 & 25.5 & \\
|
88 |
+
& & & & & & & & 0.0 & & 4.67 & 25.3 & \\
|
89 |
+
& & & & & & & & 0.2 & & 5.47 & 25.7 & \\
|
90 |
+
\hline\rule{0pt}{2.0ex}
|
91 |
+
(E) & & \multicolumn{7}{c}{positional embedding instead of sinusoids} & & 4.92 & 25.7 & \\
|
92 |
+
\hline\rule{0pt}{2.0ex}
|
93 |
+
big & 6 & 1024 & 4096 & 16 & & & 0.3 & & 300K & \textbf{4.33} & \textbf{26.4} & 213 \\
|
94 |
+
\hline
|
95 |
+
\end{tabular}
|
96 |
+
%}
|
97 |
+
\end{center}
|
98 |
+
\end{table}
|
99 |
+
|
100 |
+
|
101 |
+
%Table \ref{tab:ende-results}. Our base model for this task uses 6 attention layers, 512 hidden dim, 2048 filter dim, 8 attention heads with both attention and symbol dropout of 0.2 and 0.1 respectively. Increasing the filter size of our feed forward component to 8192 increases the BLEU score on En $\to$ De by $?$. For both the models, we use beam search decoding of size $?$ and length penalty with an alpha of $?$ \cite? \todo{Update results}
|
102 |
+
|
103 |
+
To evaluate the importance of different components of the Transformer, we varied our base model in different ways, measuring the change in performance on English-to-German translation on the development set, newstest2013. We used beam search as described in the previous section, but no checkpoint averaging. We present these results in Table~\ref{tab:variations}.
|
104 |
+
|
105 |
+
In Table~\ref{tab:variations} rows (A), we vary the number of attention heads and the attention key and value dimensions, keeping the amount of computation constant, as described in Section \ref{sec:multihead}. While single-head attention is 0.9 BLEU worse than the best setting, quality also drops off with too many heads.
|
106 |
+
|
107 |
+
In Table~\ref{tab:variations} rows (B), we observe that reducing the attention key size $d_k$ hurts model quality. This suggests that determining compatibility is not easy and that a more sophisticated compatibility function than dot product may be beneficial. We further observe in rows (C) and (D) that, as expected, bigger models are better, and dropout is very helpful in avoiding over-fitting. In row (E) we replace our sinusoidal positional encoding with learned positional embeddings \citep{JonasFaceNet2017}, and observe nearly identical results to the base model.
|
108 |
+
|
109 |
+
%To evaluate the importance of different components of the Transformer, we use our base model to ablate on a single hyperparameter at each time and measure the change in performance on English$\to$German translation. Our variations in Table~\ref{tab:variations} show that the number of attention layers and attention heads is the most important architecture hyperparamter However, the we do not see performance gains beyond 6 layers, suggesting that we either don't have enough data to train a large model or we need to turn up regularization. We leave this exploration for future work. Among our regularizers, attention dropout has the most significant impact on performance.
|
110 |
+
|
111 |
+
|
112 |
+
%Increasing the width of our feed forward component helps both on log ppl and Accuracy \marginpar{Intuition?}
|
113 |
+
%Using dropout to regularize our models helps to prevent overfitting
|
114 |
+
|
115 |
+
\subsection{English Constituency Parsing}
|
116 |
+
|
117 |
+
\begin{table}[t]
|
118 |
+
\begin{center}
|
119 |
+
\caption{The Transformer generalizes well to English constituency parsing (Results are on Section 23 of WSJ)}
|
120 |
+
\label{tab:parsing-results}
|
121 |
+
\vspace{-2mm}
|
122 |
+
%\scalebox{1.0}{
|
123 |
+
\begin{tabular}{c|c|c}
|
124 |
+
\hline
|
125 |
+
{\bf Parser} & {\bf Training} & {\bf WSJ 23 F1} \\ \hline
|
126 |
+
Vinyals \& Kaiser el al. (2014) \cite{KVparse15}
|
127 |
+
& WSJ only, discriminative & 88.3 \\
|
128 |
+
Petrov et al. (2006) \cite{petrov-EtAl:2006:ACL}
|
129 |
+
& WSJ only, discriminative & 90.4 \\
|
130 |
+
Zhu et al. (2013) \cite{zhu-EtAl:2013:ACL}
|
131 |
+
& WSJ only, discriminative & 90.4 \\
|
132 |
+
Dyer et al. (2016) \cite{dyer-rnng:16}
|
133 |
+
& WSJ only, discriminative & 91.7 \\
|
134 |
+
\specialrule{1pt}{-1pt}{0pt}
|
135 |
+
Transformer (4 layers) & WSJ only, discriminative & 91.3 \\
|
136 |
+
\specialrule{1pt}{-1pt}{0pt}
|
137 |
+
Zhu et al. (2013) \cite{zhu-EtAl:2013:ACL}
|
138 |
+
& semi-supervised & 91.3 \\
|
139 |
+
Huang \& Harper (2009) \cite{huang-harper:2009:EMNLP}
|
140 |
+
& semi-supervised & 91.3 \\
|
141 |
+
McClosky et al. (2006) \cite{mcclosky-etAl:2006:NAACL}
|
142 |
+
& semi-supervised & 92.1 \\
|
143 |
+
Vinyals \& Kaiser el al. (2014) \cite{KVparse15}
|
144 |
+
& semi-supervised & 92.1 \\
|
145 |
+
\specialrule{1pt}{-1pt}{0pt}
|
146 |
+
Transformer (4 layers) & semi-supervised & 92.7 \\
|
147 |
+
\specialrule{1pt}{-1pt}{0pt}
|
148 |
+
Luong et al. (2015) \cite{multiseq2seq}
|
149 |
+
& multi-task & 93.0 \\
|
150 |
+
Dyer et al. (2016) \cite{dyer-rnng:16}
|
151 |
+
& generative & 93.3 \\
|
152 |
+
\hline
|
153 |
+
\end{tabular}
|
154 |
+
\end{center}
|
155 |
+
\end{table}
|
156 |
+
|
157 |
+
To evaluate if the Transformer can generalize to other tasks we performed experiments on English constituency parsing. This task presents specific challenges: the output is subject to strong structural constraints and is significantly longer than the input.
|
158 |
+
Furthermore, RNN sequence-to-sequence models have not been able to attain state-of-the-art results in small-data regimes \cite{KVparse15}.
|
159 |
+
|
160 |
+
We trained a 4-layer transformer with $d_{model} = 1024$ on the Wall Street Journal (WSJ) portion of the Penn Treebank \citep{marcus1993building}, about 40K training sentences. We also trained it in a semi-supervised setting, using the larger high-confidence and BerkleyParser corpora from with approximately 17M sentences \citep{KVparse15}. We used a vocabulary of 16K tokens for the WSJ only setting and a vocabulary of 32K tokens for the semi-supervised setting.
|
161 |
+
|
162 |
+
We performed only a small number of experiments to select the dropout, both attention and residual (section~\ref{sec:reg}), learning rates and beam size on the Section 22 development set, all other parameters remained unchanged from the English-to-German base translation model. During inference, we increased the maximum output length to input length + $300$. We used a beam size of $21$ and $\alpha=0.3$ for both WSJ only and the semi-supervised setting.
|
163 |
+
|
164 |
+
Our results in Table~\ref{tab:parsing-results} show that despite the lack of task-specific tuning our model performs surprisingly well, yielding better results than all previously reported models with the exception of the Recurrent Neural Network Grammar \cite{dyer-rnng:16}.
|
165 |
+
|
166 |
+
In contrast to RNN sequence-to-sequence models \citep{KVparse15}, the Transformer outperforms the BerkeleyParser \cite{petrov-EtAl:2006:ACL} even when training only on the WSJ training set of 40K sentences.
|
crazy_functions/test_project/latex/attention/sqrt_d_trick.tex
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
\section*{Justfication of the Scaling Factor in Dot-product Attention}
|
2 |
+
|
3 |
+
In Section~\ref{sec:scaled-dot-prod}, we introduced Scaled dot-product attention, where we scale down the dot products by $\sqrt{d_k}$. In this section, we will give a rough justification of this scaling factor. If we assume that $q$ and $k$ are $d_k$-dimensional vectors whose components are independent random variables with mean $0$ and variance $1$, then their dot product, $q \cdot k = \sum_{i=1}^{d_k} u_iv_i$, has mean $0$ and variance $d_k$. Since we would prefer these values to have variance $1$, we divide by $\sqrt{d_k}$.
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
%For any two $d_k$-dimension vectors $\vec{u}$ and $\vec{v}$, whose dimensions are independent, the mean and variance of the dot product will be the summation of the product of means and variances over the dimensions, that is, $E[<\vec{u},\vec{v}>] = \sum_{i=1}^{d_k} E[u_i]E[v_i]$, and $E[(<\vec{u},\vec{v}>-E[<\vec{u},\vec{v}>])^2] = \sum_{i=1}^{d_k} E[({u_i}-E[u_i])^2] E[({v_i}-E[v_i])^2]$. Layer norm encourages the mean and variance of each dimension to be $0$ and $1$ respectively, resultig in the dot product having mean $0$ and $d_k$ respectively. Therefore, scaling by $\sqrt{d_k}$ encourages the logits to be normalized as well.
|
8 |
+
|
9 |
+
\iffalse
|
10 |
+
|
11 |
+
In this section, we will give a rough justification of this scaling factor, that is, we will show that for any two vectors, $\vec{u}$ and $\vec{v}$, whose variance and mean are $1$ and $0$ respectively, the variance and the mean of the dot product are $d_k$ and $0$ respectively. Therefore, dividing by $\sqrt{d_k}$ ensures that each component of the attention logits are normalized. The repeated layer norms at each transformer layer encourage $\vec{u}$ and $\vec{v}$ to be normalized.
|
12 |
+
|
13 |
+
|
14 |
+
\begin{align*}
|
15 |
+
E[<\vec{u},\vec{v}>] & = \sum_k E[u_i v_i] &\text{By linearity of expectation} \\
|
16 |
+
& =\sum_k E[u_i]E[v_i] & \text{Assuming independence} \\
|
17 |
+
& = 0
|
18 |
+
\end{align*}
|
19 |
+
|
20 |
+
\begin{align*}
|
21 |
+
E[(<\vec{u},\vec{v}>-E[<\vec{u},\vec{v}>])^2] & = E[(<\vec{u},\vec{v}>)^2] - E[<\vec{u},\vec{v}>]^2 \\
|
22 |
+
& = E[(<\vec{u},\vec{v}>)^2] \\
|
23 |
+
& = \sum_k E[{u_i}^2] E[{v_i}^2] &\text{By linearity of expectation and indepedence} \\
|
24 |
+
& = d_k
|
25 |
+
\end{align*}
|
26 |
+
|
27 |
+
|
28 |
+
\fi
|
crazy_functions/test_project/latex/attention/training.tex
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This section describes the training regime for our models.
|
2 |
+
|
3 |
+
%In order to speed up experimentation, our ablations are performed relative to a smaller base model described in detail in Section \ref{sec:results}.
|
4 |
+
|
5 |
+
\subsection{Training Data and Batching}
|
6 |
+
We trained on the standard WMT 2014 English-German dataset consisting of about 4.5 million sentence pairs. Sentences were encoded using byte-pair encoding \citep{DBLP:journals/corr/BritzGLL17}, which has a shared source-target vocabulary of about 37000 tokens. For English-French, we used the significantly larger WMT 2014 English-French dataset consisting of 36M sentences and split tokens into a 32000 word-piece vocabulary \citep{wu2016google}. Sentence pairs were batched together by approximate sequence length. Each training batch contained a set of sentence pairs containing approximately 25000 source tokens and 25000 target tokens.
|
7 |
+
|
8 |
+
\subsection{Hardware and Schedule}
|
9 |
+
|
10 |
+
We trained our models on one machine with 8 NVIDIA P100 GPUs. For our base models using the hyperparameters described throughout the paper, each training step took about 0.4 seconds. We trained the base models for a total of 100,000 steps or 12 hours. For our big models,(described on the bottom line of table \ref{tab:variations}), step time was 1.0 seconds. The big models were trained for 300,000 steps (3.5 days).
|
11 |
+
|
12 |
+
\subsection{Optimizer} We used the Adam optimizer~\citep{kingma2014adam} with $\beta_1=0.9$, $\beta_2=0.98$ and $\epsilon=10^{-9}$. We varied the learning rate over the course of training, according to the formula:
|
13 |
+
|
14 |
+
\begin{equation}
|
15 |
+
lrate = \dmodel^{-0.5} \cdot
|
16 |
+
\min({step\_num}^{-0.5},
|
17 |
+
{step\_num} \cdot {warmup\_steps}^{-1.5})
|
18 |
+
\end{equation}
|
19 |
+
|
20 |
+
This corresponds to increasing the learning rate linearly for the first $warmup\_steps$ training steps, and decreasing it thereafter proportionally to the inverse square root of the step number. We used $warmup\_steps=4000$.
|
21 |
+
|
22 |
+
\subsection{Regularization} \label{sec:reg}
|
23 |
+
|
24 |
+
We employ three types of regularization during training:
|
25 |
+
\paragraph{Residual Dropout} We apply dropout \citep{srivastava2014dropout} to the output of each sub-layer, before it is added to the sub-layer input and normalized. In addition, we apply dropout to the sums of the embeddings and the positional encodings in both the encoder and decoder stacks. For the base model, we use a rate of $P_{drop}=0.1$.
|
26 |
+
|
27 |
+
% \paragraph{Attention Dropout} Query to key attentions are structurally similar to hidden-to-hidden weights in a feed-forward network, albeit across positions. The softmax activations yielding attention weights can then be seen as the analogue of hidden layer activations. A natural possibility is to extend dropout \citep{srivastava2014dropout} to attention. We implement attention dropout by dropping out attention weights as,
|
28 |
+
% \begin{equation*}
|
29 |
+
% \mathrm{Attention}(Q, K, V) = \mathrm{dropout}(\mathrm{softmax}(\frac{QK^T}{\sqrt{d}}))V
|
30 |
+
% \end{equation*}
|
31 |
+
% In addition to residual dropout, we found attention dropout to be beneficial for our parsing experiments.
|
32 |
+
|
33 |
+
%\paragraph{Symbol Dropout} In the source and target embedding layers, we replace a random subset of the token ids with a sentinel id. For the base model, we use a rate of $symbol\_dropout\_rate=0.1$. Note that this applies only to the auto-regressive use of the target ids - not their use in the cross-entropy loss.
|
34 |
+
|
35 |
+
%\paragraph{Attention Dropout} Query to memory attentions are structurally similar to hidden-to-hidden weights in a feed-forward network, albeit across positions. The softmax activations yielding attention weights can then be seen as the analogue of hidden layer activations. A natural possibility is to extend dropout \citep{srivastava2014dropout} to attentions. We implement attention dropout by dropping out attention weights as,
|
36 |
+
%\begin{equation*}
|
37 |
+
% A(Q, K, V) = \mathrm{dropout}(\mathrm{softmax}(\frac{QK^T}{\sqrt{d}}))V
|
38 |
+
%\end{equation*}
|
39 |
+
%As a result, the query will not be able to access the memory values at the dropped out position. In our experiments, we tried attention dropout rates of 0.2, and 0.3, and found it to work favorably for English-to-German translation.
|
40 |
+
%$attention\_dropout\_rate=0.2$.
|
41 |
+
|
42 |
+
\paragraph{Label Smoothing} During training, we employed label smoothing of value $\epsilon_{ls}=0.1$ \citep{DBLP:journals/corr/SzegedyVISW15}. This hurts perplexity, as the model learns to be more unsure, but improves accuracy and BLEU score.
|
crazy_functions/test_project/latex/attention/visualizations.tex
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
\pagebreak
|
2 |
+
\section*{Attention Visualizations}\label{sec:viz-att}
|
3 |
+
\begin{figure*}[h]
|
4 |
+
{\includegraphics[width=\textwidth, trim=0 0 0 36, clip]{./vis/making_more_difficult5_new.pdf}}
|
5 |
+
\caption{An example of the attention mechanism following long-distance dependencies in the encoder self-attention in layer 5 of 6. Many of the attention heads attend to a distant dependency of the verb `making', completing the phrase `making...more difficult'. Attentions here shown only for the word `making'. Different colors represent different heads. Best viewed in color.}
|
6 |
+
\end{figure*}
|
7 |
+
|
8 |
+
\begin{figure*}
|
9 |
+
{\includegraphics[width=\textwidth, trim=0 0 0 45, clip]{./vis/anaphora_resolution_new.pdf}}
|
10 |
+
{\includegraphics[width=\textwidth, trim=0 0 0 37, clip]{./vis/anaphora_resolution2_new.pdf}}
|
11 |
+
\caption{Two attention heads, also in layer 5 of 6, apparently involved in anaphora resolution. Top: Full attentions for head 5. Bottom: Isolated attentions from just the word `its' for attention heads 5 and 6. Note that the attentions are very sharp for this word.}
|
12 |
+
\end{figure*}
|
13 |
+
|
14 |
+
\begin{figure*}
|
15 |
+
{\includegraphics[width=\textwidth, trim=0 0 0 36, clip]{./vis/attending_to_head_new.pdf}}
|
16 |
+
{\includegraphics[width=\textwidth, trim=0 0 0 36, clip]{./vis/attending_to_head2_new.pdf}}
|
17 |
+
\caption{Many of the attention heads exhibit behaviour that seems related to the structure of the sentence. We give two such examples above, from two different heads from the encoder self-attention at layer 5 of 6. The heads clearly learned to perform different tasks.}
|
18 |
+
\end{figure*}
|
crazy_functions/test_project/latex/attention/why_self_attention.tex
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%We focus on the general task of mapping one variable-length sequence of symbol representations ${x_1, ..., x_n} \in \mathbb{R}^d$ to another sequence of the same length ${y_1, ..., y_n} \in \mathbb{R}^d$. \marginpar{should we use this notation? alternatively we can just say "d-dimensional vectors"}
|
2 |
+
|
3 |
+
In this section we compare various aspects of self-attention layers to the recurrent and convolutional layers commonly used for mapping one variable-length sequence of symbol representations $(x_1, ..., x_n)$ to another sequence of equal length $(z_1, ..., z_n)$, with $x_i, z_i \in \mathbb{R}^d$, such as a hidden layer in a typical sequence transduction encoder or decoder. Motivating our use of self-attention we consider three desiderata.
|
4 |
+
|
5 |
+
One is the total computational complexity per layer.
|
6 |
+
Another is the amount of computation that can be parallelized, as measured by the minimum number of sequential operations required.
|
7 |
+
|
8 |
+
The third is the path length between long-range dependencies in the network. Learning long-range dependencies is a key challenge in many sequence transduction tasks. One key factor affecting the ability to learn such dependencies is the length of the paths forward and backward signals have to traverse in the network. The shorter these paths between any combination of positions in the input and output sequences, the easier it is to learn long-range dependencies \citep{hochreiter2001gradient}. Hence we also compare the maximum path length between any two input and output positions in networks composed of the different layer types.
|
9 |
+
|
10 |
+
%\subsection{Computational Performance and Path Lengths}
|
11 |
+
|
12 |
+
\begin{table}[t]
|
13 |
+
\caption{
|
14 |
+
Maximum path lengths, per-layer complexity and minimum number of sequential operations for different layer types. $n$ is the sequence length, $d$ is the representation dimension, $k$ is the kernel size of convolutions and $r$ the size of the neighborhood in restricted self-attention.}
|
15 |
+
%Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth.
|
16 |
+
\label{tab:op_complexities}
|
17 |
+
\begin{center}
|
18 |
+
\vspace{-1mm}
|
19 |
+
%\scalebox{0.75}{
|
20 |
+
|
21 |
+
\begin{tabular}{lccc}
|
22 |
+
\toprule
|
23 |
+
Layer Type & Complexity per Layer & Sequential & Maximum Path Length \\
|
24 |
+
& & Operations & \\
|
25 |
+
\hline
|
26 |
+
\rule{0pt}{2.0ex}Self-Attention & $O(n^2 \cdot d)$ & $O(1)$ & $O(1)$ \\
|
27 |
+
Recurrent & $O(n \cdot d^2)$ & $O(n)$ & $O(n)$ \\
|
28 |
+
|
29 |
+
Convolutional & $O(k \cdot n \cdot d^2)$ & $O(1)$ & $O(log_k(n))$ \\
|
30 |
+
%\cmidrule
|
31 |
+
Self-Attention (restricted)& $O(r \cdot n \cdot d)$ & $O(1)$ & $O(n/r)$ \\
|
32 |
+
|
33 |
+
%Convolutional (separable) & $O(k \cdot n \cdot d + n \cdot d^2)$ & $O(1)$ & $O(log_k(n))$ \\
|
34 |
+
|
35 |
+
%Position-wise Feed-Forward & $O(n \cdot d^2)$ & $O(1)$ & $\infty$ \\
|
36 |
+
|
37 |
+
%Fully Connected & $O(n^2 \cdot d^2)$ & $O(1)$ & $O(1)$ \\
|
38 |
+
%Convolutional (separable) & $O(k \cdot n \cdot d + n \cdot d^2)$ & $O(1)$ & $O(log_k(n))$ \\
|
39 |
+
|
40 |
+
%Position-wise Feed-Forward & $O(n \cdot d^2)$ & $O(1)$ & $\infty$ \\
|
41 |
+
|
42 |
+
%Fully Connected & $O(n^2 \cdot d^2)$ & $O(1)$ & $O(1)$ \\
|
43 |
+
\bottomrule
|
44 |
+
\end{tabular}
|
45 |
+
%}
|
46 |
+
\end{center}
|
47 |
+
\end{table}
|
48 |
+
|
49 |
+
|
50 |
+
%\begin{table}[b]
|
51 |
+
%\caption{
|
52 |
+
% Maximum path lengths, per-layer complexity and minimum number of sequential operations for different layer types. $n$ is the sequence length, $d$ is the representation dimensionality, $k$ is the kernel size of convolutions and $r$ the size of the neighborhood in localized self-attention.}
|
53 |
+
%Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth.
|
54 |
+
%\label{tab:op_complexities}
|
55 |
+
%\begin{center}
|
56 |
+
%\vspace{-1mm}
|
57 |
+
%%\scalebox{0.75}{
|
58 |
+
%
|
59 |
+
%\begin{tabular}{lccc}
|
60 |
+
%\hline
|
61 |
+
%Layer Type & Receptive & Complexity per Layer & Sequential %\\
|
62 |
+
% & Field Size & & Operations \\
|
63 |
+
%\hline
|
64 |
+
%Self-Attention & $n$ & $O(n^2 \cdot d)$ & $O(1)$ \\
|
65 |
+
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
|
66 |
+
|
67 |
+
%Convolutional & $k$ & $O(k \cdot n \cdot d^2)$ & %$O(log_k(n))$ \\
|
68 |
+
%\hline
|
69 |
+
%Self-Attention (localized)& $r$ & $O(r \cdot n \cdot d)$ & %$O(1)$ \\
|
70 |
+
|
71 |
+
%Convolutional (separable) & $k$ & $O(k \cdot n \cdot d + n %\cdot d^2)$ & $O(log_k(n))$ \\
|
72 |
+
|
73 |
+
%Position-wise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ %\\
|
74 |
+
|
75 |
+
%Fully Connected & $n$ & $O(n^2 \cdot d^2)$ & $O(1)$ \\
|
76 |
+
|
77 |
+
%\end{tabular}
|
78 |
+
%%}
|
79 |
+
%\end{center}
|
80 |
+
%\end{table}
|
81 |
+
|
82 |
+
%The receptive field size of a layer is the number of different input representations that can influence any particular output representation. Recurrent layers and self-attention layers have a full receptive field equal to the sequence length $n$. Convolutional layers have a limited receptive field equal to their kernel width $k$, which is generally chosen to be small in order to limit computational cost.
|
83 |
+
|
84 |
+
As noted in Table \ref{tab:op_complexities}, a self-attention layer connects all positions with a constant number of sequentially executed operations, whereas a recurrent layer requires $O(n)$ sequential operations.
|
85 |
+
In terms of computational complexity, self-attention layers are faster than recurrent layers when the sequence length $n$ is smaller than the representation dimensionality $d$, which is most often the case with sentence representations used by state-of-the-art models in machine translations, such as word-piece \citep{wu2016google} and byte-pair \citep{sennrich2015neural} representations.
|
86 |
+
To improve computational performance for tasks involving very long sequences, self-attention could be restricted to considering only a neighborhood of size $r$ in the input sequence centered around the respective output position. This would increase the maximum path length to $O(n/r)$. We plan to investigate this approach further in future work.
|
87 |
+
|
88 |
+
A single convolutional layer with kernel width $k < n$ does not connect all pairs of input and output positions. Doing so requires a stack of $O(n/k)$ convolutional layers in the case of contiguous kernels, or $O(log_k(n))$ in the case of dilated convolutions \citep{NalBytenet2017}, increasing the length of the longest paths between any two positions in the network.
|
89 |
+
Convolutional layers are generally more expensive than recurrent layers, by a factor of $k$. Separable convolutions \citep{xception2016}, however, decrease the complexity considerably, to $O(k \cdot n \cdot d + n \cdot d^2)$. Even with $k=n$, however, the complexity of a separable convolution is equal to the combination of a self-attention layer and a point-wise feed-forward layer, the approach we take in our model.
|
90 |
+
|
91 |
+
%\subsection{Unfiltered Bottleneck Argument}
|
92 |
+
|
93 |
+
%An orthogonal argument can be made for self-attention layers based on when the layer imposes the bottleneck of mapping all of the information used to compute a given output position into a single, fixed-length vector. ...
|
94 |
+
|
95 |
+
%There is a second argument for self-attention layers which we call the unfiltered bottleneck argument. In both recurrent and the convolutional layers, the information that position $i$ receives from the other positions is compressed to a vector of dimension $d$ before it ever can be filtered by the content $x_i$. More precisely, we can express $y_i = F(i, x_i, G(i, \{x_{j \neq i}\}))$, where $G(i, \{x_{j \neq i}\})$ is a vector of dimension $d$. Intuitively, we would expect that this would cause a large amount of irrelevant information to crowd out the relevant information. Self-attention does not suffer from the unfiltered bottleneck problem, since the aggregation happens after filtering, and so, intuitively, we have the chance of transmitting lots of relevant information.
|
96 |
+
|
97 |
+
As side benefit, self-attention could yield more interpretable models. We inspect attention distributions from our models and present and discuss examples in the appendix. Not only do individual attention heads clearly learn to perform different tasks, many appear to exhibit behavior related to the syntactic and semantic structure of the sentences.
|
98 |
+
|
crazy_functions/test_project/latex/attention/来源
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chatgpt的老祖宗《Attention is all you need》
|
2 |
+
|
3 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin
|
4 |
+
|
5 |
+
真实的摘要如下
|
6 |
+
The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.
|
7 |
+
|
8 |
+
https://arxiv.org/abs/1706.03762
|
crazy_functions/test_project/python/dqn/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from stable_baselines3.dqn.dqn import DQN
|
2 |
+
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy
|
crazy_functions/test_project/python/dqn/dqn.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
2 |
+
|
3 |
+
import gym
|
4 |
+
import numpy as np
|
5 |
+
import torch as th
|
6 |
+
from torch.nn import functional as F
|
7 |
+
|
8 |
+
from stable_baselines3.common import logger
|
9 |
+
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
|
10 |
+
from stable_baselines3.common.preprocessing import maybe_transpose
|
11 |
+
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
12 |
+
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
|
13 |
+
from stable_baselines3.dqn.policies import DQNPolicy
|
14 |
+
|
15 |
+
|
16 |
+
class DQN(OffPolicyAlgorithm):
|
17 |
+
"""
|
18 |
+
Deep Q-Network (DQN)
|
19 |
+
|
20 |
+
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
|
21 |
+
Default hyperparameters are taken from the nature paper,
|
22 |
+
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
|
23 |
+
|
24 |
+
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
|
25 |
+
:param env: The environment to learn from (if registered in Gym, can be str)
|
26 |
+
:param learning_rate: The learning rate, it can be a function
|
27 |
+
of the current progress remaining (from 1 to 0)
|
28 |
+
:param buffer_size: size of the replay buffer
|
29 |
+
:param learning_starts: how many steps of the model to collect transitions for before learning starts
|
30 |
+
:param batch_size: Minibatch size for each gradient update
|
31 |
+
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
|
32 |
+
:param gamma: the discount factor
|
33 |
+
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
|
34 |
+
like ``(5, "step")`` or ``(2, "episode")``.
|
35 |
+
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
|
36 |
+
Set to ``-1`` means to do as many gradient steps as steps done in the environment
|
37 |
+
during the rollout.
|
38 |
+
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
|
39 |
+
at a cost of more complexity.
|
40 |
+
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
|
41 |
+
:param target_update_interval: update the target network every ``target_update_interval``
|
42 |
+
environment steps.
|
43 |
+
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
|
44 |
+
:param exploration_initial_eps: initial value of random action probability
|
45 |
+
:param exploration_final_eps: final value of random action probability
|
46 |
+
:param max_grad_norm: The maximum value for the gradient clipping
|
47 |
+
:param tensorboard_log: the log location for tensorboard (if None, no logging)
|
48 |
+
:param create_eval_env: Whether to create a second environment that will be
|
49 |
+
used for evaluating the agent periodically. (Only available when passing string for the environment)
|
50 |
+
:param policy_kwargs: additional arguments to be passed to the policy on creation
|
51 |
+
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
|
52 |
+
:param seed: Seed for the pseudo random generators
|
53 |
+
:param device: Device (cpu, cuda, ...) on which the code should be run.
|
54 |
+
Setting it to auto, the code will be run on the GPU if possible.
|
55 |
+
:param _init_setup_model: Whether or not to build the network at the creation of the instance
|
56 |
+
"""
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
policy: Union[str, Type[DQNPolicy]],
|
61 |
+
env: Union[GymEnv, str],
|
62 |
+
learning_rate: Union[float, Schedule] = 1e-4,
|
63 |
+
buffer_size: int = 1000000,
|
64 |
+
learning_starts: int = 50000,
|
65 |
+
batch_size: Optional[int] = 32,
|
66 |
+
tau: float = 1.0,
|
67 |
+
gamma: float = 0.99,
|
68 |
+
train_freq: Union[int, Tuple[int, str]] = 4,
|
69 |
+
gradient_steps: int = 1,
|
70 |
+
optimize_memory_usage: bool = False,
|
71 |
+
target_update_interval: int = 10000,
|
72 |
+
exploration_fraction: float = 0.1,
|
73 |
+
exploration_initial_eps: float = 1.0,
|
74 |
+
exploration_final_eps: float = 0.05,
|
75 |
+
max_grad_norm: float = 10,
|
76 |
+
tensorboard_log: Optional[str] = None,
|
77 |
+
create_eval_env: bool = False,
|
78 |
+
policy_kwargs: Optional[Dict[str, Any]] = None,
|
79 |
+
verbose: int = 0,
|
80 |
+
seed: Optional[int] = None,
|
81 |
+
device: Union[th.device, str] = "auto",
|
82 |
+
_init_setup_model: bool = True,
|
83 |
+
):
|
84 |
+
|
85 |
+
super(DQN, self).__init__(
|
86 |
+
policy,
|
87 |
+
env,
|
88 |
+
DQNPolicy,
|
89 |
+
learning_rate,
|
90 |
+
buffer_size,
|
91 |
+
learning_starts,
|
92 |
+
batch_size,
|
93 |
+
tau,
|
94 |
+
gamma,
|
95 |
+
train_freq,
|
96 |
+
gradient_steps,
|
97 |
+
action_noise=None, # No action noise
|
98 |
+
policy_kwargs=policy_kwargs,
|
99 |
+
tensorboard_log=tensorboard_log,
|
100 |
+
verbose=verbose,
|
101 |
+
device=device,
|
102 |
+
create_eval_env=create_eval_env,
|
103 |
+
seed=seed,
|
104 |
+
sde_support=False,
|
105 |
+
optimize_memory_usage=optimize_memory_usage,
|
106 |
+
supported_action_spaces=(gym.spaces.Discrete,),
|
107 |
+
)
|
108 |
+
|
109 |
+
self.exploration_initial_eps = exploration_initial_eps
|
110 |
+
self.exploration_final_eps = exploration_final_eps
|
111 |
+
self.exploration_fraction = exploration_fraction
|
112 |
+
self.target_update_interval = target_update_interval
|
113 |
+
self.max_grad_norm = max_grad_norm
|
114 |
+
# "epsilon" for the epsilon-greedy exploration
|
115 |
+
self.exploration_rate = 0.0
|
116 |
+
# Linear schedule will be defined in `_setup_model()`
|
117 |
+
self.exploration_schedule = None
|
118 |
+
self.q_net, self.q_net_target = None, None
|
119 |
+
|
120 |
+
if _init_setup_model:
|
121 |
+
self._setup_model()
|
122 |
+
|
123 |
+
def _setup_model(self) -> None:
|
124 |
+
super(DQN, self)._setup_model()
|
125 |
+
self._create_aliases()
|
126 |
+
self.exploration_schedule = get_linear_fn(
|
127 |
+
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
|
128 |
+
)
|
129 |
+
|
130 |
+
def _create_aliases(self) -> None:
|
131 |
+
self.q_net = self.policy.q_net
|
132 |
+
self.q_net_target = self.policy.q_net_target
|
133 |
+
|
134 |
+
def _on_step(self) -> None:
|
135 |
+
"""
|
136 |
+
Update the exploration rate and target network if needed.
|
137 |
+
This method is called in ``collect_rollouts()`` after each step in the environment.
|
138 |
+
"""
|
139 |
+
if self.num_timesteps % self.target_update_interval == 0:
|
140 |
+
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
|
141 |
+
|
142 |
+
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
|
143 |
+
logger.record("rollout/exploration rate", self.exploration_rate)
|
144 |
+
|
145 |
+
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
|
146 |
+
# Update learning rate according to schedule
|
147 |
+
self._update_learning_rate(self.policy.optimizer)
|
148 |
+
|
149 |
+
losses = []
|
150 |
+
for _ in range(gradient_steps):
|
151 |
+
# Sample replay buffer
|
152 |
+
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
|
153 |
+
|
154 |
+
with th.no_grad():
|
155 |
+
# Compute the next Q-values using the target network
|
156 |
+
next_q_values = self.q_net_target(replay_data.next_observations)
|
157 |
+
# Follow greedy policy: use the one with the highest value
|
158 |
+
next_q_values, _ = next_q_values.max(dim=1)
|
159 |
+
# Avoid potential broadcast issue
|
160 |
+
next_q_values = next_q_values.reshape(-1, 1)
|
161 |
+
# 1-step TD target
|
162 |
+
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
|
163 |
+
|
164 |
+
# Get current Q-values estimates
|
165 |
+
current_q_values = self.q_net(replay_data.observations)
|
166 |
+
|
167 |
+
# Retrieve the q-values for the actions from the replay buffer
|
168 |
+
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
|
169 |
+
|
170 |
+
# Compute Huber loss (less sensitive to outliers)
|
171 |
+
loss = F.smooth_l1_loss(current_q_values, target_q_values)
|
172 |
+
losses.append(loss.item())
|
173 |
+
|
174 |
+
# Optimize the policy
|
175 |
+
self.policy.optimizer.zero_grad()
|
176 |
+
loss.backward()
|
177 |
+
# Clip gradient norm
|
178 |
+
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
|
179 |
+
self.policy.optimizer.step()
|
180 |
+
|
181 |
+
# Increase update counter
|
182 |
+
self._n_updates += gradient_steps
|
183 |
+
|
184 |
+
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
|
185 |
+
logger.record("train/loss", np.mean(losses))
|
186 |
+
|
187 |
+
def predict(
|
188 |
+
self,
|
189 |
+
observation: np.ndarray,
|
190 |
+
state: Optional[np.ndarray] = None,
|
191 |
+
mask: Optional[np.ndarray] = None,
|
192 |
+
deterministic: bool = False,
|
193 |
+
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
194 |
+
"""
|
195 |
+
Overrides the base_class predict function to include epsilon-greedy exploration.
|
196 |
+
|
197 |
+
:param observation: the input observation
|
198 |
+
:param state: The last states (can be None, used in recurrent policies)
|
199 |
+
:param mask: The last masks (can be None, used in recurrent policies)
|
200 |
+
:param deterministic: Whether or not to return deterministic actions.
|
201 |
+
:return: the model's action and the next state
|
202 |
+
(used in recurrent policies)
|
203 |
+
"""
|
204 |
+
if not deterministic and np.random.rand() < self.exploration_rate:
|
205 |
+
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
|
206 |
+
n_batch = observation.shape[0]
|
207 |
+
action = np.array([self.action_space.sample() for _ in range(n_batch)])
|
208 |
+
else:
|
209 |
+
action = np.array(self.action_space.sample())
|
210 |
+
else:
|
211 |
+
action, state = self.policy.predict(observation, state, mask, deterministic)
|
212 |
+
return action, state
|
213 |
+
|
214 |
+
def learn(
|
215 |
+
self,
|
216 |
+
total_timesteps: int,
|
217 |
+
callback: MaybeCallback = None,
|
218 |
+
log_interval: int = 4,
|
219 |
+
eval_env: Optional[GymEnv] = None,
|
220 |
+
eval_freq: int = -1,
|
221 |
+
n_eval_episodes: int = 5,
|
222 |
+
tb_log_name: str = "DQN",
|
223 |
+
eval_log_path: Optional[str] = None,
|
224 |
+
reset_num_timesteps: bool = True,
|
225 |
+
) -> OffPolicyAlgorithm:
|
226 |
+
|
227 |
+
return super(DQN, self).learn(
|
228 |
+
total_timesteps=total_timesteps,
|
229 |
+
callback=callback,
|
230 |
+
log_interval=log_interval,
|
231 |
+
eval_env=eval_env,
|
232 |
+
eval_freq=eval_freq,
|
233 |
+
n_eval_episodes=n_eval_episodes,
|
234 |
+
tb_log_name=tb_log_name,
|
235 |
+
eval_log_path=eval_log_path,
|
236 |
+
reset_num_timesteps=reset_num_timesteps,
|
237 |
+
)
|
238 |
+
|
239 |
+
def _excluded_save_params(self) -> List[str]:
|
240 |
+
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
|
241 |
+
|
242 |
+
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
|
243 |
+
state_dicts = ["policy", "policy.optimizer"]
|
244 |
+
|
245 |
+
return state_dicts, []
|
crazy_functions/test_project/python/dqn/policies.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional, Type
|
2 |
+
|
3 |
+
import gym
|
4 |
+
import torch as th
|
5 |
+
from torch import nn
|
6 |
+
|
7 |
+
from stable_baselines3.common.policies import BasePolicy, register_policy
|
8 |
+
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
|
9 |
+
from stable_baselines3.common.type_aliases import Schedule
|
10 |
+
|
11 |
+
|
12 |
+
class QNetwork(BasePolicy):
|
13 |
+
"""
|
14 |
+
Action-Value (Q-Value) network for DQN
|
15 |
+
|
16 |
+
:param observation_space: Observation space
|
17 |
+
:param action_space: Action space
|
18 |
+
:param net_arch: The specification of the policy and value networks.
|
19 |
+
:param activation_fn: Activation function
|
20 |
+
:param normalize_images: Whether to normalize images or not,
|
21 |
+
dividing by 255.0 (True by default)
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(
|
25 |
+
self,
|
26 |
+
observation_space: gym.spaces.Space,
|
27 |
+
action_space: gym.spaces.Space,
|
28 |
+
features_extractor: nn.Module,
|
29 |
+
features_dim: int,
|
30 |
+
net_arch: Optional[List[int]] = None,
|
31 |
+
activation_fn: Type[nn.Module] = nn.ReLU,
|
32 |
+
normalize_images: bool = True,
|
33 |
+
):
|
34 |
+
super(QNetwork, self).__init__(
|
35 |
+
observation_space,
|
36 |
+
action_space,
|
37 |
+
features_extractor=features_extractor,
|
38 |
+
normalize_images=normalize_images,
|
39 |
+
)
|
40 |
+
|
41 |
+
if net_arch is None:
|
42 |
+
net_arch = [64, 64]
|
43 |
+
|
44 |
+
self.net_arch = net_arch
|
45 |
+
self.activation_fn = activation_fn
|
46 |
+
self.features_extractor = features_extractor
|
47 |
+
self.features_dim = features_dim
|
48 |
+
self.normalize_images = normalize_images
|
49 |
+
action_dim = self.action_space.n # number of actions
|
50 |
+
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
|
51 |
+
self.q_net = nn.Sequential(*q_net)
|
52 |
+
|
53 |
+
def forward(self, obs: th.Tensor) -> th.Tensor:
|
54 |
+
"""
|
55 |
+
Predict the q-values.
|
56 |
+
|
57 |
+
:param obs: Observation
|
58 |
+
:return: The estimated Q-Value for each action.
|
59 |
+
"""
|
60 |
+
return self.q_net(self.extract_features(obs))
|
61 |
+
|
62 |
+
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
63 |
+
q_values = self.forward(observation)
|
64 |
+
# Greedy action
|
65 |
+
action = q_values.argmax(dim=1).reshape(-1)
|
66 |
+
return action
|
67 |
+
|
68 |
+
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
69 |
+
data = super()._get_constructor_parameters()
|
70 |
+
|
71 |
+
data.update(
|
72 |
+
dict(
|
73 |
+
net_arch=self.net_arch,
|
74 |
+
features_dim=self.features_dim,
|
75 |
+
activation_fn=self.activation_fn,
|
76 |
+
features_extractor=self.features_extractor,
|
77 |
+
)
|
78 |
+
)
|
79 |
+
return data
|
80 |
+
|
81 |
+
|
82 |
+
class DQNPolicy(BasePolicy):
|
83 |
+
"""
|
84 |
+
Policy class with Q-Value Net and target net for DQN
|
85 |
+
|
86 |
+
:param observation_space: Observation space
|
87 |
+
:param action_space: Action space
|
88 |
+
:param lr_schedule: Learning rate schedule (could be constant)
|
89 |
+
:param net_arch: The specification of the policy and value networks.
|
90 |
+
:param activation_fn: Activation function
|
91 |
+
:param features_extractor_class: Features extractor to use.
|
92 |
+
:param features_extractor_kwargs: Keyword arguments
|
93 |
+
to pass to the features extractor.
|
94 |
+
:param normalize_images: Whether to normalize images or not,
|
95 |
+
dividing by 255.0 (True by default)
|
96 |
+
:param optimizer_class: The optimizer to use,
|
97 |
+
``th.optim.Adam`` by default
|
98 |
+
:param optimizer_kwargs: Additional keyword arguments,
|
99 |
+
excluding the learning rate, to pass to the optimizer
|
100 |
+
"""
|
101 |
+
|
102 |
+
def __init__(
|
103 |
+
self,
|
104 |
+
observation_space: gym.spaces.Space,
|
105 |
+
action_space: gym.spaces.Space,
|
106 |
+
lr_schedule: Schedule,
|
107 |
+
net_arch: Optional[List[int]] = None,
|
108 |
+
activation_fn: Type[nn.Module] = nn.ReLU,
|
109 |
+
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
|
110 |
+
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
111 |
+
normalize_images: bool = True,
|
112 |
+
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
113 |
+
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
114 |
+
):
|
115 |
+
super(DQNPolicy, self).__init__(
|
116 |
+
observation_space,
|
117 |
+
action_space,
|
118 |
+
features_extractor_class,
|
119 |
+
features_extractor_kwargs,
|
120 |
+
optimizer_class=optimizer_class,
|
121 |
+
optimizer_kwargs=optimizer_kwargs,
|
122 |
+
)
|
123 |
+
|
124 |
+
if net_arch is None:
|
125 |
+
if features_extractor_class == FlattenExtractor:
|
126 |
+
net_arch = [64, 64]
|
127 |
+
else:
|
128 |
+
net_arch = []
|
129 |
+
|
130 |
+
self.net_arch = net_arch
|
131 |
+
self.activation_fn = activation_fn
|
132 |
+
self.normalize_images = normalize_images
|
133 |
+
|
134 |
+
self.net_args = {
|
135 |
+
"observation_space": self.observation_space,
|
136 |
+
"action_space": self.action_space,
|
137 |
+
"net_arch": self.net_arch,
|
138 |
+
"activation_fn": self.activation_fn,
|
139 |
+
"normalize_images": normalize_images,
|
140 |
+
}
|
141 |
+
|
142 |
+
self.q_net, self.q_net_target = None, None
|
143 |
+
self._build(lr_schedule)
|
144 |
+
|
145 |
+
def _build(self, lr_schedule: Schedule) -> None:
|
146 |
+
"""
|
147 |
+
Create the network and the optimizer.
|
148 |
+
|
149 |
+
:param lr_schedule: Learning rate schedule
|
150 |
+
lr_schedule(1) is the initial learning rate
|
151 |
+
"""
|
152 |
+
|
153 |
+
self.q_net = self.make_q_net()
|
154 |
+
self.q_net_target = self.make_q_net()
|
155 |
+
self.q_net_target.load_state_dict(self.q_net.state_dict())
|
156 |
+
|
157 |
+
# Setup optimizer with initial learning rate
|
158 |
+
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
|
159 |
+
|
160 |
+
def make_q_net(self) -> QNetwork:
|
161 |
+
# Make sure we always have separate networks for features extractors etc
|
162 |
+
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
|
163 |
+
return QNetwork(**net_args).to(self.device)
|
164 |
+
|
165 |
+
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
166 |
+
return self._predict(obs, deterministic=deterministic)
|
167 |
+
|
168 |
+
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
169 |
+
return self.q_net._predict(obs, deterministic=deterministic)
|
170 |
+
|
171 |
+
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
172 |
+
data = super()._get_constructor_parameters()
|
173 |
+
|
174 |
+
data.update(
|
175 |
+
dict(
|
176 |
+
net_arch=self.net_args["net_arch"],
|
177 |
+
activation_fn=self.net_args["activation_fn"],
|
178 |
+
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
|
179 |
+
optimizer_class=self.optimizer_class,
|
180 |
+
optimizer_kwargs=self.optimizer_kwargs,
|
181 |
+
features_extractor_class=self.features_extractor_class,
|
182 |
+
features_extractor_kwargs=self.features_extractor_kwargs,
|
183 |
+
)
|
184 |
+
)
|
185 |
+
return data
|
186 |
+
|
187 |
+
|
188 |
+
MlpPolicy = DQNPolicy
|
189 |
+
|
190 |
+
|
191 |
+
class CnnPolicy(DQNPolicy):
|
192 |
+
"""
|
193 |
+
Policy class for DQN when using images as input.
|
194 |
+
|
195 |
+
:param observation_space: Observation space
|
196 |
+
:param action_space: Action space
|
197 |
+
:param lr_schedule: Learning rate schedule (could be constant)
|
198 |
+
:param net_arch: The specification of the policy and value networks.
|
199 |
+
:param activation_fn: Activation function
|
200 |
+
:param features_extractor_class: Features extractor to use.
|
201 |
+
:param normalize_images: Whether to normalize images or not,
|
202 |
+
dividing by 255.0 (True by default)
|
203 |
+
:param optimizer_class: The optimizer to use,
|
204 |
+
``th.optim.Adam`` by default
|
205 |
+
:param optimizer_kwargs: Additional keyword arguments,
|
206 |
+
excluding the learning rate, to pass to the optimizer
|
207 |
+
"""
|
208 |
+
|
209 |
+
def __init__(
|
210 |
+
self,
|
211 |
+
observation_space: gym.spaces.Space,
|
212 |
+
action_space: gym.spaces.Space,
|
213 |
+
lr_schedule: Schedule,
|
214 |
+
net_arch: Optional[List[int]] = None,
|
215 |
+
activation_fn: Type[nn.Module] = nn.ReLU,
|
216 |
+
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
|
217 |
+
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
218 |
+
normalize_images: bool = True,
|
219 |
+
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
220 |
+
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
221 |
+
):
|
222 |
+
super(CnnPolicy, self).__init__(
|
223 |
+
observation_space,
|
224 |
+
action_space,
|
225 |
+
lr_schedule,
|
226 |
+
net_arch,
|
227 |
+
activation_fn,
|
228 |
+
features_extractor_class,
|
229 |
+
features_extractor_kwargs,
|
230 |
+
normalize_images,
|
231 |
+
optimizer_class,
|
232 |
+
optimizer_kwargs,
|
233 |
+
)
|
234 |
+
|
235 |
+
|
236 |
+
register_policy("MlpPolicy", MlpPolicy)
|
237 |
+
register_policy("CnnPolicy", CnnPolicy)
|
crazy_functions/test_project/python/dqn/来源
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
github stablebaseline3
|
2 |
+
https://github.com/DLR-RM/stable-baselines3
|
crazy_functions/读文章写摘要.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import wraps
|
2 |
+
from predict import predict_no_ui
|
3 |
+
fast_debug = False
|
4 |
+
|
5 |
+
|
6 |
+
def report_execption(chatbot, history, a, b):
|
7 |
+
chatbot.append((a, b))
|
8 |
+
history.append(a); history.append(b)
|
9 |
+
|
10 |
+
# 捕获不能预料的异常
|
11 |
+
def CatchException(f):
|
12 |
+
@wraps(f)
|
13 |
+
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
14 |
+
try:
|
15 |
+
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
16 |
+
except Exception as e:
|
17 |
+
import traceback
|
18 |
+
tb_str = traceback.format_exc()
|
19 |
+
chatbot[-1] = (chatbot[-1][0], f"[Local Message] something error occured: \n {tb_str}")
|
20 |
+
yield chatbot, history, f'异常 {e}'
|
21 |
+
return decorated
|
22 |
+
|
23 |
+
def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
24 |
+
import time, glob, os
|
25 |
+
print('begin analysis on:', file_manifest)
|
26 |
+
for index, fp in enumerate(file_manifest):
|
27 |
+
with open(fp, 'r', encoding='utf-8') as f:
|
28 |
+
file_content = f.read()
|
29 |
+
|
30 |
+
前言 = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
31 |
+
i_say = 前言 + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
32 |
+
i_say_show_user = 前言 + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
33 |
+
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
34 |
+
print('[1] yield chatbot, history')
|
35 |
+
yield chatbot, history, '正常'
|
36 |
+
|
37 |
+
if not fast_debug:
|
38 |
+
msg = '正常'
|
39 |
+
# ** gpt request **
|
40 |
+
while True:
|
41 |
+
try:
|
42 |
+
gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
|
43 |
+
break
|
44 |
+
except ConnectionAbortedError as e:
|
45 |
+
i_say = i_say[:len(i_say)//2]
|
46 |
+
msg = '文件太长,进行了拦腰截断'
|
47 |
+
|
48 |
+
print('[2] end gpt req')
|
49 |
+
chatbot[-1] = (i_say_show_user, gpt_say)
|
50 |
+
history.append(i_say_show_user); history.append(gpt_say)
|
51 |
+
print('[3] yield chatbot, history')
|
52 |
+
yield chatbot, history, msg
|
53 |
+
print('[4] next')
|
54 |
+
if not fast_debug: time.sleep(2)
|
55 |
+
|
56 |
+
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
57 |
+
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
58 |
+
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
59 |
+
yield chatbot, history, '正常'
|
60 |
+
|
61 |
+
if not fast_debug:
|
62 |
+
msg = '正常'
|
63 |
+
# ** gpt request **
|
64 |
+
while True:
|
65 |
+
try:
|
66 |
+
gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
67 |
+
break
|
68 |
+
except ConnectionAbortedError as e:
|
69 |
+
history = [his[len(his)//2:] for his in history]
|
70 |
+
msg = '对话历史太长,每段历史拦腰截断'
|
71 |
+
|
72 |
+
|
73 |
+
chatbot[-1] = (i_say, gpt_say)
|
74 |
+
history.append(i_say); history.append(gpt_say)
|
75 |
+
yield chatbot, history, msg
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
@CatchException
|
81 |
+
def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
82 |
+
history = [] # 清空历史,以免输入溢出
|
83 |
+
import glob, os
|
84 |
+
if os.path.exists(txt):
|
85 |
+
project_folder = txt
|
86 |
+
else:
|
87 |
+
if txt == "": txt = '空空如也的输入栏'
|
88 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
89 |
+
yield chatbot, history, '正常'
|
90 |
+
return
|
91 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
92 |
+
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
93 |
+
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
94 |
+
if len(file_manifest) == 0:
|
95 |
+
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
96 |
+
yield chatbot, history, '正常'
|
97 |
+
return
|
98 |
+
yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
functional_crazy.py
CHANGED
@@ -1,7 +1,23 @@
|
|
|
|
1 |
from predict import predict_no_ui
|
2 |
fast_debug = False
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
|
|
5 |
for i in range(5):
|
6 |
i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}'
|
7 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
@@ -14,10 +30,11 @@ def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPr
|
|
14 |
yield chatbot, history, '正常' # 显示
|
15 |
|
16 |
|
|
|
17 |
def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
|
|
18 |
import time, glob, os
|
19 |
file_manifest = [f for f in glob.glob('*.py')]
|
20 |
-
|
21 |
for index, fp in enumerate(file_manifest):
|
22 |
with open(fp, 'r', encoding='utf-8') as f:
|
23 |
file_content = f.read()
|
@@ -113,7 +130,9 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot,
|
|
113 |
|
114 |
|
115 |
|
|
|
116 |
def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
|
|
117 |
import glob, os
|
118 |
if os.path.exists(txt):
|
119 |
project_folder = txt
|
@@ -130,8 +149,9 @@ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPr
|
|
130 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
131 |
|
132 |
|
133 |
-
|
134 |
def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
|
|
135 |
import glob, os
|
136 |
if os.path.exists(txt):
|
137 |
project_folder = txt
|
@@ -150,21 +170,25 @@ def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, s
|
|
150 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
151 |
|
152 |
|
153 |
-
|
154 |
def get_crazy_functionals():
|
|
|
155 |
return {
|
156 |
"[实验功能] 请解析并解构此项目本身": {
|
157 |
"Color": "stop", # 按钮颜色
|
158 |
"Function": 解析项目本身
|
159 |
},
|
160 |
-
"[实验功能]
|
161 |
"Color": "stop", # 按钮颜色
|
162 |
"Function": 解析一个Python项目
|
163 |
},
|
164 |
-
"[实验功能]
|
165 |
"Color": "stop", # 按钮颜色
|
166 |
"Function": 解析一个C项目的头文件
|
167 |
},
|
|
|
|
|
|
|
|
|
168 |
"[实验功能] 高阶功能模板函数": {
|
169 |
"Color": "stop", # 按钮颜色
|
170 |
"Function": 高阶功能模板函数
|
|
|
1 |
+
from functools import wraps
|
2 |
from predict import predict_no_ui
|
3 |
fast_debug = False
|
4 |
|
5 |
+
def CatchException(f):
|
6 |
+
@wraps(f)
|
7 |
+
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
8 |
+
try:
|
9 |
+
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
10 |
+
except Exception as e:
|
11 |
+
import traceback
|
12 |
+
tb_str = traceback.format_exc()
|
13 |
+
chatbot[-1] = (chatbot[-1][0], f"[Local Message] something error occured: \n {tb_str}")
|
14 |
+
yield chatbot, history, f'异常 {e}'
|
15 |
+
return decorated
|
16 |
+
|
17 |
+
|
18 |
+
@CatchException
|
19 |
def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
20 |
+
history = [] # 清空历史,以免输入溢出
|
21 |
for i in range(5):
|
22 |
i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}'
|
23 |
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
|
|
30 |
yield chatbot, history, '正常' # 显示
|
31 |
|
32 |
|
33 |
+
@CatchException
|
34 |
def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
35 |
+
history = [] # 清空历史,以免输入溢出
|
36 |
import time, glob, os
|
37 |
file_manifest = [f for f in glob.glob('*.py')]
|
|
|
38 |
for index, fp in enumerate(file_manifest):
|
39 |
with open(fp, 'r', encoding='utf-8') as f:
|
40 |
file_content = f.read()
|
|
|
130 |
|
131 |
|
132 |
|
133 |
+
@CatchException
|
134 |
def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
135 |
+
history = [] # 清空历史,以免输入溢出
|
136 |
import glob, os
|
137 |
if os.path.exists(txt):
|
138 |
project_folder = txt
|
|
|
149 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
150 |
|
151 |
|
152 |
+
@CatchException
|
153 |
def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
154 |
+
history = [] # 清空历史,以免输入溢出
|
155 |
import glob, os
|
156 |
if os.path.exists(txt):
|
157 |
project_folder = txt
|
|
|
170 |
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
171 |
|
172 |
|
|
|
173 |
def get_crazy_functionals():
|
174 |
+
from crazy_functions.读文章写摘要 import 读文章写摘要
|
175 |
return {
|
176 |
"[实验功能] 请解析并解构此项目本身": {
|
177 |
"Color": "stop", # 按钮颜色
|
178 |
"Function": 解析项目本身
|
179 |
},
|
180 |
+
"[实验功能] 解析整个Python项目(input输入项目根路径)": {
|
181 |
"Color": "stop", # 按钮颜色
|
182 |
"Function": 解析一个Python项目
|
183 |
},
|
184 |
+
"[实验功能] 解析整个C++项目的头文件(input输入项目根路径)": {
|
185 |
"Color": "stop", # 按钮颜色
|
186 |
"Function": 解析一个C项目的头文件
|
187 |
},
|
188 |
+
"[实验功能] 解读latex论文写摘要(input输入项目根路径)": {
|
189 |
+
"Color": "stop", # 按钮颜色
|
190 |
+
"Function": 读文章写摘要
|
191 |
+
},
|
192 |
"[实验功能] 高阶功能模板函数": {
|
193 |
"Color": "stop", # 按钮颜色
|
194 |
"Function": 高阶功能模板函数
|
predict.py
CHANGED
@@ -86,6 +86,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
|
86 |
chunk = next(stream_response)
|
87 |
# print(chunk.decode()[6:])
|
88 |
if is_head_of_the_stream:
|
|
|
89 |
is_head_of_the_stream = False; continue
|
90 |
|
91 |
if chunk:
|
|
|
86 |
chunk = next(stream_response)
|
87 |
# print(chunk.decode()[6:])
|
88 |
if is_head_of_the_stream:
|
89 |
+
# 数据流的第一帧不携带content
|
90 |
is_head_of_the_stream = False; continue
|
91 |
|
92 |
if chunk:
|