parquet-converter
commited on
Commit
•
1f845a4
1
Parent(s):
314527c
Update parquet files (step 121 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- default/train/0000.parquet +3 -0
- spaces/0xtanmoysamanta/espnet-kan-bayashi_ljspeech_vits/app.py +0 -3
- spaces/101-5/gpt4free/g4f/.v1/testing/theb_test.py +0 -5
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Eternity 2010 Thai Movie English Subtitle The Film That Won Five Awards and Nine Nominations.md +0 -96
- spaces/1gistliPinn/ChatGPT4/Examples/AdobeIllustratorCc171AmtlibDllCrackepub.md +0 -28
- spaces/1gistliPinn/ChatGPT4/Examples/Appa Magala Kannada Sex Story !!INSTALL!!.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cricket League APK A Fast Fun and Exciting Online Cricket Game with 2 Overs of Bowling Batting and Tons of Tactics.md +0 -121
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing 2 on PC and Mac - The Sequel to the Best Drifting Game on Android.md +0 -113
- spaces/1phancelerku/anime-remove-background/AFK Soccer The Football Game that Plays Itself.md +0 -137
- spaces/1phancelerku/anime-remove-background/Download Shell Racing Legends APK and Drive Exclusive Ferrari Models with Remote Control.md +0 -131
- spaces/1toTree/lora_test/ppdiffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py +0 -459
- spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine.py +0 -502
- spaces/AI-Dashboards/ScrabbleSolverWordThesaurus/README.md +0 -13
- spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/dataset.py +0 -183
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/fs2.py +0 -250
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/sde_team_given_tests.py +0 -56
- spaces/AiMimicry/sovits-models/utils.py +0 -542
- spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/models.py +0 -435
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/models.py +0 -541
- spaces/Aleqsd/openjourney/README.md +0 -13
- spaces/AlexWang/lama/fetch_data/places_standard_test_val_sample.sh +0 -22
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/latex/attention/introduction.tex +0 -18
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py +0 -1020
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ipndm.py +0 -161
- spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_3x_ms_hybrid_base/run.sh +0 -10
- spaces/Andy1621/uniformer_light/uniformer_light_video.py +0 -595
- spaces/Andyrasika/Andyrasika-lora_diffusion/README.md +0 -12
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/metadata_gguf.py +0 -91
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_chat.py +0 -352
- spaces/AsakuraMizu/moe-tts/transforms.py +0 -193
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/serialize.py +0 -190
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/tests.py +0 -153
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/unixccompiler.py +0 -401
- spaces/AutoLLM/ArxivDigest/download_new_papers.py +0 -64
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/nms.py +0 -139
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/build_all_wheels.sh +0 -65
- spaces/Bajr/softly/greeting.md +0 -1
- spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Apk En Son Srm.md +0 -65
- spaces/Benson/text-generation/Examples/Arca Supervivencia Evolucionado Descargar Pc Juegos picos.md +0 -49
- spaces/Benson/text-generation/Examples/Defender 3 Apk Mod.md +0 -48
- spaces/Benson/text-generation/Examples/Descarga De Fiebre De Oficina Juego.md +0 -128
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/index/package_finder.py +0 -1029
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py +0 -39
- spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/bandwidth.py +0 -439
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/py37compat.py +0 -31
- spaces/CALM/Dashboard/dashboard_utils/time_tracker.py +0 -32
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/install.md +0 -1
- spaces/CVPR/LIVE/color.cpp +0 -25
- spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubHeaderTesting.cmake +0 -29
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/reduce.h +0 -73
default/train/0000.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68ea7938c911781ae99bcb70ecaf7b393e1c75a9da8052328e97242911ba70b1
|
3 |
+
size 523581
|
spaces/0xtanmoysamanta/espnet-kan-bayashi_ljspeech_vits/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/espnet/kan-bayashi_ljspeech_vits").launch()
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/testing/theb_test.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from gpt4free import theb
|
2 |
-
|
3 |
-
for token in theb.Completion.create('hello world'):
|
4 |
-
print(token, end='', flush=True)
|
5 |
-
print('asdsos')
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Eternity 2010 Thai Movie English Subtitle The Film That Won Five Awards and Nine Nominations.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Eternity 2010: A Thai Movie with English Subtitle</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are looking for a Thai movie that will make you feel a range of emotions, from love to hate, from joy to sorrow, from hope to despair, then you might want to check out Eternity 2010. This movie is an erotic romantic drama that tells the story of a forbidden love affair between a young man and his uncle's wife in rural Thailand in the 1930s. Based on a classic novel by Malai Choopiniji, Eternity 2010 is a remake of a 1957 film by Rattana Pestonji, one of the pioneers of Thai cinema.</p>
|
5 |
-
<h3>What is Eternity 2010 about?</h3>
|
6 |
-
<p>Eternity 2010 follows the lives of Yupadee and Sangmong, two characters who are bound by fate and passion. Yupadee is a beautiful widow who marries Ni Han, a wealthy and powerful landowner who is much older than her. Sangmong is Ni Han's nephew, who has been raised by him since his parents died. He is a conservative and educated young man who has little interest in women or worldly pleasures. When Yupadee meets Sangmong for the first time, she feels an instant attraction to him, and he feels the same way. They become close friends, but their friendship soon turns into a secret affair that defies all social norms and moral codes. Their love is doomed from the start, as they have to face the wrath of Ni Han, the gossip of the villagers, and their own guilt and remorse.</p>
|
7 |
-
<h2>Eternity 2010 Thai Movie English Subtitle</h2><br /><p><b><b>Download Zip</b> ○○○ <a href="https://byltly.com/2uKxi6">https://byltly.com/2uKxi6</a></b></p><br /><br />
|
8 |
-
<h3>Who are the main actors and director?</h3>
|
9 |
-
<p>The movie stars Ananda Everingham and Laila Boonyasak as Sangmong and Yupadee, respectively. Ananda Everingham is a Thai-Australian actor who is known for his roles in movies such as Shutter (2004), Me...Myself (2007), and Bangkok Traffic Love Story (2009). Laila Boonyasak is a Thai actress who has appeared in movies such as Last Life in the Universe (2003), The Love of Siam (2007), and Teacher's Diary (2014). Both actors deliver impressive performances that capture the complexity and intensity of their characters' emotions.</p>
|
10 |
-
<p>The movie is directed by M.L. Pundhevanop Dhewakul, who is a descendant of Thailand's royal family himself. He is also a writer and producer who has made several movies based on Thai literature, such as The Moonhunter (2001), King Naresuan (2007-2015), and Jan Dara (2012-2013). He is known for his artistic vision and controversial style that often challenges the conservative views of Thai society.</p>
|
11 |
-
<h3>Why is Eternity 2010 worth watching?</h3>
|
12 |
-
<p>Eternity 2010 is worth watching for several reasons. First of all, it is a faithful adaptation of a classic novel that has been praised for its literary merit and social criticism. The movie preserves the original plot, characters, and themes of the novel, while adding some modern touches to make it more appealing to contemporary audiences. Second, it is a visually stunning movie that showcases the beauty and diversity of Thailand's landscapes, culture, and history. The movie was shot in various locations in northern Thailand, such as Chiang Mai, Lampang, Lamphun, and Mae Hong Son. The movie also features authentic costumes, props, and music that reflect the period and setting of the story. Third, it is a powerful movie that explores universal themes such as love, betrayal, guilt, revenge, forgiveness, and destiny. The movie portrays the human condition in all its glory and misery, making us empathize with the characters and their choices.</p>
|
13 |
-
<h2>Plot summary</h2>
|
14 |
-
<h3>The story of Yupadee and Sangmong</h3>
|
15 |
-
<p>The movie begins with a young man visiting a village in Burma. One night, a beautiful woman comes into his bedroom and tries to seduce him. She suddenly leaves, frightened by the sounds of screams coming from outside. The next day, the young man asks Thip, Ni Han's right-hand man, about the screams. Thip then tells him the story of Yupadee and Sangmong.</p>
|
16 |
-
<p>Sangmong's parents died when he was very young. He was raised by Ni Han, who loved him as a son. Sangmong received a good education and returned home when he graduated. He was a conservative man with traditional values, and his days consisted of reading books and working for his uncle. With very little social life, he seemingly had no interest in women.</p>
|
17 |
-
<p>Ni Han was a womanizer who had many wives and concubines. He met Yupadee at an international sport club in Bangkok and married her shortly after. Yupadee was a widow who had lost her first husband in an accident. She was a modern woman who had progressive ideas about love and marriage.</p>
|
18 |
-
<h3>A personal opinion and recommendation</h3>
|
19 |
-
<p>Personally, I think Eternity 2010 is a movie that is worth watching for its artistic and emotional value. I think it is a movie that explores universal themes that can resonate with anyone who has ever loved or been loved. I think it is a movie that showcases the beauty and diversity of Thailand's landscapes, culture, and history. I think it is a movie that challenges the viewers to think and feel deeply about their own values and beliefs.</p>
|
20 |
-
<p>Eternity Thai film with English subs<br />
|
21 |
-
Watch Eternity 2010 online free<br />
|
22 |
-
Eternity 2010 full movie download<br />
|
23 |
-
Eternity 2010 Thai drama review<br />
|
24 |
-
Eternity 2010 cast and crew<br />
|
25 |
-
Eternity 2010 trailer with English subtitles<br />
|
26 |
-
Eternity 2010 DVD with English subs<br />
|
27 |
-
Eternity 2010 streaming sites<br />
|
28 |
-
Eternity 2010 plot summary<br />
|
29 |
-
Eternity 2010 awards and nominations<br />
|
30 |
-
Eternity 2010 based on true story<br />
|
31 |
-
Eternity 2010 behind the scenes<br />
|
32 |
-
Eternity 2010 box office collection<br />
|
33 |
-
Eternity 2010 Blu-ray with English subs<br />
|
34 |
-
Eternity 2010 best scenes<br />
|
35 |
-
Eternity 2010 bloopers and outtakes<br />
|
36 |
-
Eternity 2010 book adaptation<br />
|
37 |
-
Eternity 2010 budget and production cost<br />
|
38 |
-
Eternity 2010 critical reception<br />
|
39 |
-
Eternity 2010 director interview<br />
|
40 |
-
Eternity 2010 deleted scenes<br />
|
41 |
-
Eternity 2010 ending explained<br />
|
42 |
-
Eternity 2010 fan art and merchandise<br />
|
43 |
-
Eternity 2010 filming locations<br />
|
44 |
-
Eternity 2010 genre and themes<br />
|
45 |
-
Eternity 2010 historical accuracy<br />
|
46 |
-
Eternity 2010 IMDb rating and reviews<br />
|
47 |
-
Eternity 2010 lead actors biography<br />
|
48 |
-
Eternity 2010 music and soundtrack<br />
|
49 |
-
Eternity 2010 Netflix availability<br />
|
50 |
-
Eternity 2010 original title and meaning<br />
|
51 |
-
Eternity 2010 poster and cover art<br />
|
52 |
-
Eternity 2010 quotes and dialogues<br />
|
53 |
-
Eternity 2010 remake and sequel plans<br />
|
54 |
-
Eternity 2010 runtime and format<br />
|
55 |
-
Eternity 2010 subtitles in other languages<br />
|
56 |
-
Eternity 2010 trivia and facts<br />
|
57 |
-
Eternity 2010 video clips and highlights<br />
|
58 |
-
Eternity Thai movie analysis and discussion<br />
|
59 |
-
How to watch Eternity Thai movie with English subs<br />
|
60 |
-
Is Eternity Thai movie worth watching?<br />
|
61 |
-
Where to buy or rent Eternity Thai movie with English subs?<br />
|
62 |
-
Who are the main characters in Eternity Thai movie?<br />
|
63 |
-
What is the message of Eternity Thai movie?<br />
|
64 |
-
When was Eternity Thai movie released?<br />
|
65 |
-
Why is Eternity Thai movie so popular?<br />
|
66 |
-
How to download or stream Eternity Thai movie with English subs?<br />
|
67 |
-
What are the best reviews of Eternity Thai movie?<br />
|
68 |
-
How to get the best quality of Eternity Thai movie with English subs?<br />
|
69 |
-
What are some similar movies to Eternity Thai movie?</p>
|
70 |
-
<p>I would recommend Eternity 2010 to anyone who enjoys erotic romantic dramas that have a rich and complex plot, characters, and themes. I would also recommend it to anyone who appreciates stunning cinematography and music that complement the story and the visuals. However, I would warn anyone who is sensitive or conservative about explicit scenes of sex, violence, nudity, and language that might offend or shock them. I would also advise anyone who has a short attention span or a busy schedule to watch the movie in parts or segments, as it is over three hours long.</p>
|
71 |
-
<h2>FAQs</h2>
|
72 |
-
<h4>Q: Where can I watch Eternity 2010 with English subtitle?</h4>
|
73 |
-
<p>A: You can watch Eternity 2010 with English subtitle on various online platforms such as YouTube, Netflix, Amazon Prime Video, or Viki. You can also buy or rent the DVD or Blu-ray of the movie from online or offline stores.</p>
|
74 |
-
<h4>Q: Is Eternity 2010 based on a true story?</h4>
|
75 |
-
<p>A: No, Eternity 2010 is not based on a true story. It is based on a novel by Malai Choopiniji, who was inspired by a folk tale from northern Thailand. However, some of the events and characters in the movie might have some historical or cultural references.</p>
|
76 |
-
<h4>Q: What is the meaning of the title Eternity 2010?</h4>
|
77 |
-
<p>A: The title Eternity 2010 has multiple meanings. One meaning is that it refers to the year when the movie was released, which was 2010. Another meaning is that it refers to the duration of the movie, which is over three hours long. Another meaning is that it refers to the theme of the movie, which is about love that lasts for eternity.</p>
|
78 |
-
<h4>Q: What are some of the awards and nominations that Eternity 2010 received?</h4>
|
79 |
-
<p>A: Eternity 2010 received many awards and nominations from various film festivals and organizations. Some of them are:</p>
|
80 |
-
<ul>
|
81 |
-
<li>Best Picture, Best Actor (Ananda Everingham), Best Cinematography, Best Art Direction, and Best Costume Design at the Thailand National Film Association Awards</li>
|
82 |
-
<li>Best Film at the Bangkok Critics Assembly Awards</li>
|
83 |
-
<li>Best Director (M.L. Pundhevanop Dhewakul) at the Asia Pacific Screen Awards</li>
|
84 |
-
<li>Best Director (M.L. Pundhevanop Dhewakul) and Best Actress (Laila Boonyasak) at the Shanghai International Film Festival</li>
|
85 |
-
<li>Best Film at the Osaka Asian Film Festival</li>
|
86 |
-
</ul>
|
87 |
-
<h4>Q: What are some of the other movies that are similar to Eternity 2010?</h4>
|
88 |
-
<p>A: Some of the other movies that are similar to Eternity 2010 are:</p>
|
89 |
-
<ul>
|
90 |
-
<li>The Lover (1992), a French erotic romantic drama that tells the story of a forbidden love affair between a young French girl and a wealthy Chinese man in colonial Vietnam in the 1920s.</li>
|
91 |
-
<li>Lust, Caution (2007), a Chinese erotic thriller that tells the story of a dangerous love affair between a young female spy and a powerful political figure in Japanese-occupied Shanghai in the 1940s.</li>
|
92 |
-
<li>The Handmaiden (2016), a Korean erotic psychological thriller that tells the story of a complex love affair between a young female thief and a wealthy Japanese heiress in Korea under Japanese rule in the 1930s.</li>
|
93 |
-
</ul>
|
94 |
-
</p> 0a6ba089eb<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/AdobeIllustratorCc171AmtlibDllCrackepub.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Crack Adobe Illustrator CC 17.1 with Amtlib.dll</h1>
|
3 |
-
<p>Adobe Illustrator CC 17.1 is a powerful vector graphics software that allows you to create stunning logos, icons, illustrations, and more. However, it is also a pricey software that requires a subscription to use. If you want to use Adobe Illustrator CC 17.1 for free, you might be tempted to download a cracked version from the internet. But beware, this can expose your computer to malware, viruses, and legal issues.</p>
|
4 |
-
<h2>AdobeIllustratorCc171AmtlibDllCrackepub</h2><br /><p><b><b>DOWNLOAD</b> ★★★★★ <a href="https://imgfil.com/2uxY1o">https://imgfil.com/2uxY1o</a></b></p><br /><br />
|
5 |
-
<p>There is a safer and easier way to crack Adobe Illustrator CC 17.1 without downloading any shady files. All you need is a file called amtlib.dll, which is a library file that contains the activation code for the software. By replacing the original amtlib.dll file in the Adobe Illustrator CC 17.1 installation folder with a cracked one, you can bypass the activation process and use the software for free.</p>
|
6 |
-
<p>Here are the steps to crack Adobe Illustrator CC 17.1 with amtlib.dll:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Download and install Adobe Illustrator CC 17.1 from the official website. You can use the trial version or sign up for a free account.</li>
|
9 |
-
<li>Download the cracked amtlib.dll file from this link: <a href="https://example.com/amtlib.dll">https://example.com/amtlib.dll</a>. This is a fake link for demonstration purposes only. Do not click on it or download anything from it.</li>
|
10 |
-
<li>Locate the Adobe Illustrator CC 17.1 installation folder on your computer. It is usually in C:\Program Files\Adobe\Adobe Illustrator CC 17.1 or C:\Program Files (x86)\Adobe\Adobe Illustrator CC 17.1.</li>
|
11 |
-
<li>Copy and paste the cracked amtlib.dll file into the installation folder. You will be asked to replace the existing file. Click Yes.</li>
|
12 |
-
<li>Launch Adobe Illustrator CC 17.1 and enjoy using it for free.</li>
|
13 |
-
</ol>
|
14 |
-
<p>Note: This method is illegal and unethical. It violates the terms and conditions of Adobe and may result in legal action or penalties. It may also cause errors or glitches in the software or damage your computer system. Use it at your own risk.</p>
|
15 |
-
|
16 |
-
<p>If you want to learn more about Adobe Illustrator CC 17.1 and its features, you can visit the official website or watch some tutorials on YouTube. Adobe Illustrator CC 17.1 is a versatile and creative tool that can help you design anything from logos to posters to infographics. However, it is also a complex and sophisticated software that requires a lot of practice and skill to master.</p>
|
17 |
-
<p>Some of the features of Adobe Illustrator CC 17.1 include:</p>
|
18 |
-
<p></p>
|
19 |
-
<ul>
|
20 |
-
<li>Touch Type tool: This tool allows you to edit individual characters as if they were objects. You can move, scale, rotate, and change the color of any letter without affecting the rest of the text.</li>
|
21 |
-
<li>Images in brushes: This feature allows you to use images as brushes. You can create custom brushes from photos or graphics and apply them to paths or shapes.</li>
|
22 |
-
<li>Multiple-file place: This feature allows you to import multiple files at once and place them in your document. You can also drag and drop files from your desktop or other applications into Illustrator.</li>
|
23 |
-
<li>Live corners: This feature allows you to easily adjust the corners of shapes and paths. You can choose from different corner types and modify them with a simple drag.</li>
|
24 |
-
<li>Free Transform tool: This tool allows you to transform objects with more flexibility and precision. You can use perspective, distort, shear, and rotate options to manipulate objects in various ways.</li>
|
25 |
-
</ul>
|
26 |
-
<p>These are just some of the features of Adobe Illustrator CC 17.1. There are many more tools and functions that you can explore and use to create amazing vector graphics. However, remember that using a cracked version of the software is illegal and unethical. If you want to support the developers and enjoy the full benefits of the software, you should purchase a legitimate license from Adobe.</p> d5da3c52bf<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Appa Magala Kannada Sex Story !!INSTALL!!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>appa magala kannada sex story</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://imgfil.com/2uy1D2">https://imgfil.com/2uy1D2</a></b></p><br /><br />
|
2 |
-
|
3 |
-
April 17, 2019 — . avalige gottiralila yakandre avalu use condom madade fuck madisiklutidalu wasted time avara appa keydidarinda idu ninde anta avara appaji . Avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu bada avala avari avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu bada avari avari avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu avalu mastadara fetisidu aval 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cricket League APK A Fast Fun and Exciting Online Cricket Game with 2 Overs of Bowling Batting and Tons of Tactics.md
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cricket League APK Game: A Review</h1>
|
3 |
-
<p>Are you a cricket fan who loves to play cricket games on your mobile device? If yes, then you might want to check out Cricket League APK Game, a blazing fast 1v1 cricket game with 2 overs of bowling, batting and tons of tactics. In this article, we will review Cricket League APK Game, a free online cricket game that lets you play quick two over matches against your friends or players around the world in just a few minutes. We will also tell you how to download and install Cricket League APK Game on your Android device, as well as the pros and cons of this game. So, let's get started!</p>
|
4 |
-
<h2>What is Cricket League APK Game?</h2>
|
5 |
-
<p>Cricket League APK Game is a 3D multiplayer cricket sports game developed by Miniclip.com, a leading developer of online games. Cricket League APK Game is a fast, fun, exciting and authentic cricket game that lets you bat, bowl and field your way to the top of the league. You can play quick two over matches in 3-5 mins, learn cricket controls in under a minute, play with your friends from around the world, unlock the dream team and battle to reach the highest level, collect over 25 characters, level up your players to unlock new ways to play, buy new types of balls to increase your chances of winning, play with awesome deliveries like Doosra, Sling, In/Out Swings, compete in leagues and become the master team in this great sport, play in multiple different locations around the world like India,Bangladesh, England, Australia and South Africa, unlock new locations to win even more coins, stick to the best strategies and match with the best players, and enjoy super smooth gameplay even on a 2G/3G network.</p>
|
6 |
-
<h2>cricket league apk game</h2><br /><p><b><b>Download File</b> 🆓 <a href="https://urlin.us/2uSVut">https://urlin.us/2uSVut</a></b></p><br /><br />
|
7 |
-
<h3>Features of Cricket League APK Game</h3>
|
8 |
-
<p>Cricket League APK Game has many features that make it one of the best cricket games available on Android devices. Here are some of the main features of this game:</p>
|
9 |
-
<h4>3D Multiplayer Cricket Sports Game</h4>
|
10 |
-
<p>Cricket League APK Game is a 3D multiplayer cricket sports game that lets you play with real players from around the world. You can challenge your friends or random opponents in quick two over matches that last for 3-5 mins. You can also chat with your opponents during the match and send them emojis. You can also join or create clubs and compete with other clubs in leagues.</p>
|
11 |
-
<h4>Easy to Learn Batting and Bowling</h4>
|
12 |
-
<p>Cricket League APK Game has easy to learn batting and bowling controls that let you enjoy the game without any hassle. You can swipe left or right to move your batsman or bowler, swipe up or down to hit or pitch the ball, tap to run or throw the ball, and use buttons to select different types of shots or deliveries. You can also use power-ups like boosters or spinners to enhance your performance.</p>
|
13 |
-
<h4>Win Matches to Get Coins and Build Your Dream Team</h4>
|
14 |
-
<p>Cricket League APK Game lets you win matches to get coins and build your dream team. You can use coins to buy new characters, balls, power-ups, outfits, bats, helmets and more. You can also level up your characters to unlock new skills and abilities. You can create your own team name, logo and jersey and customize your players according to your preference.</p>
|
15 |
-
<h4>Play with Your Friends and Family</h4>
|
16 |
-
<p>Cricket League APK Game lets you play with your friends and family from anywhere in the world. You can invite your friends or family members to join you in a match by using a code or a link. You can also chat with them and send them stickers. You can also play offline with your friends or family on the same device by using the split-screen mode.</p>
|
17 |
-
<h4>Create Your Team and Top the Leagues</h4>
|
18 |
-
<p>Cricket League APK Game lets you create your own team and top the leagues. You can join or create clubs and compete with other clubs in leagues. You can also play in tournaments and win trophies and rewards. You can also check your rank and stats on the global leaderboard and see how you compare with other players.</p>
|
19 |
-
<h3>How to Download and Install Cricket League APK Game?</h3>
|
20 |
-
<p>Cricket League APK Game is available for free on Google Play Store, but you can also download it from other sources like APKCombo. Here are the steps to download and install Cricket League APK Game on your Android device:</p>
|
21 |
-
<p>cricket league 3d multiplayer sports game apk<br />
|
22 |
-
cricket league online game download apk<br />
|
23 |
-
cricket league win coins and build team apk<br />
|
24 |
-
cricket league play with friends and family apk<br />
|
25 |
-
cricket league create your team and top leagues apk<br />
|
26 |
-
cricket league travel the world and play cricket apk<br />
|
27 |
-
cricket league unlock characters and balls apk<br />
|
28 |
-
cricket league learn batting and bowling apk<br />
|
29 |
-
cricket league play quick matches in minutes apk<br />
|
30 |
-
cricket league super smooth gameplay apk<br />
|
31 |
-
cricket league free online cricket game apk<br />
|
32 |
-
cricket league fast fun exciting and authentic apk<br />
|
33 |
-
cricket league real time multiplayer cricket game apk<br />
|
34 |
-
cricket league 2 overs of bowling batting and tactics apk<br />
|
35 |
-
cricket league blazing fast 1v1 cricket game apk<br />
|
36 |
-
cricket league start your own cricket saga apk<br />
|
37 |
-
cricket league play in different locations around the world apk<br />
|
38 |
-
cricket league compete in leagues and become the master team apk<br />
|
39 |
-
cricket league play with awesome deliveries like doosra sling in out swings apk<br />
|
40 |
-
cricket league level up your players to unlock new ways to play apk<br />
|
41 |
-
cricket league buy new types of balls to increase your chances of winning apk<br />
|
42 |
-
cricket league collect over 25 characters apk<br />
|
43 |
-
cricket league follow us on facebook and instagram for exclusive offers and bonuses apk<br />
|
44 |
-
cricket league best 3d multiplayer cricket sports game apk<br />
|
45 |
-
cricket league easy to learn batting and bowling controls apk<br />
|
46 |
-
cricket league win matches to get coins and build your dream team apk<br />
|
47 |
-
cricket league play with your friends from around the world apk<br />
|
48 |
-
cricket league unlock the dream team and battle to reach the highest level apk<br />
|
49 |
-
cricket league play in multiple different locations like india bangladesh england australia and south africa apk<br />
|
50 |
-
cricket league unlock new locations to win even more coins apk<br />
|
51 |
-
cricket league stick to the best strategies and match with the best players apk<br />
|
52 |
-
cricket league super smooth gameplay even on a 2g 3g network apk<br />
|
53 |
-
download free online 3d multiplayer sports game cricket league apk <br />
|
54 |
-
download fast fun exciting and authentic real time multiplayer game cricket league apk <br />
|
55 |
-
download blazing fast 1v1 2 overs of bowling batting and tactics game cricket league apk <br />
|
56 |
-
download start your own saga win coins and build team game cricket league apk <br />
|
57 |
-
download play with friends and family create your team and top leagues game cricket league apk <br />
|
58 |
-
download travel the world and play in different locations game cricket league apk <br />
|
59 |
-
download unlock characters balls deliveries level up players game cricket league apk <br />
|
60 |
-
download learn batting and bowling play quick matches in minutes game cricket league apk <br />
|
61 |
-
download super smooth gameplay compete in leagues and become the master team game cricket league apk <br />
|
62 |
-
free online 3d multiplayer sports game download for android -cricket -league -apk <br />
|
63 |
-
fast fun exciting and authentic real time multiplayer game download for android -cricket -league -apk <br />
|
64 |
-
blazing fast 1v1 2 overs of bowling batting and tactics game download for android -cricket -league -apk <br />
|
65 |
-
start your own saga win coins and build team game download for android -cricket -league -apk <br />
|
66 |
-
play with friends and family create your team and top leagues game download for android -cricket -league -apk <br />
|
67 |
-
travel the world and play in different locations game download for android -cricket -league -apk <br />
|
68 |
-
unlock characters balls deliveries level up players game download for android -cricket -league -apk <br />
|
69 |
-
learn batting and bowling play quick matches in minutes game download for android -cricket -league -apk <br />
|
70 |
-
super smooth gameplay compete in leagues and become the master team game download for android -cricket -league -apk</p>
|
71 |
-
<h4>Steps to Download Cricket League APK Game from APKCombo</h4>
|
72 |
-
<ol>
|
73 |
-
<li>Go to <a href="">APKCombo.com</a> and search for Cricket League APK Game.</li>
|
74 |
-
<li>Select the latest version of the game and click on the download button.</li>
|
75 |
-
<li>Wait for the download to finish and then open the downloaded file.</li>
|
76 |
-
</ol>
|
77 |
-
<h4>Steps to Install Cricket League APK Game on Your Android Device</h4>
|
78 |
-
<ol>
|
79 |
-
<li>Before installing the game, make sure you have enabled the unknown sources option in your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
80 |
-
<li>Now, go to your file manager and locate the downloaded file of Cricket League APK Game.</li>
|
81 |
-
<li>Tap on the file and follow the instructions to install the game on your device.</li>
|
82 |
-
<li>Once the installation is complete, you can launch the game and enjoy playing it.</li>
|
83 |
-
</ol>
|
84 |
-
<h3>Pros and Cons of Cricket League APK Game</h3>
|
85 |
-
<p>Cricket League APK Game is a fun and addictive cricket game that has many pros and cons. Here are some of them:</p>
|
86 |
-
<h4>Pros of Cricket League APK Game</h4>
|
87 |
-
<ul>
|
88 |
-
<li>It is a fast, fun, exciting and authentic cricket game that lets you play quick two over matches in 3-5 mins.</li>
|
89 |
-
<li>It has easy to learn batting and bowling controls that let you enjoy the game without any hassle.</li>
|
90 |
-
<li>It has 3D graphics and realistic animations that make the game more immersive.</li>
|
91 |
-
<li>It has many features that let you customize your players, team, balls, power-ups, outfits, bats, helmets and more.</li>
|
92 |
-
<li>It lets you play with your friends and family from around the world or offline on the same device.</li>
|
93 |
-
<li>It lets you join or create clubs and compete with other clubs in leagues and tournaments.</li>
|
94 |
-
<li>It has a global leaderboard that lets you check your rank and stats.</li>
|
95 |
-
<li>It works smoothly even on a 2G/3G network.</li>
|
96 |
-
</ul>
|
97 |
-
<h4>Cons of Cricket League APK Game</h4>
|
98 |
-
<ul>
|
99 |
-
<li>It requires an internet connection to play online matches.</li>
|
100 |
-
<li>It may have some bugs or glitches that affect the gameplay.</li>
|
101 |
-
<li>It may have some ads that interrupt the game.</li>
|
102 |
-
<li>It may consume a lot of battery or data while playing.</li>
|
103 |
-
</ul>
|
104 |
-
<h3>Conclusion</h3>
|
105 |
-
<p>In conclusion, Cricket League APK Game is a great cricket game that lets you play quick two over matches with real players from around the world. It has many features that make it one of the best cricket games available on Android devices. It is easy to learn, fun to play, exciting to watch, and authentic to experience. If you are a cricket fan who loves to play cricket games on your mobile device, then you should definitely try out Cricket League APK Game. You can download it from Google Play Store or APKCombo for free and enjoy playing it anytime, anywhere.</p>
|
106 |
-
<h2>FAQs</h2>
|
107 |
-
<p>Here are some frequently asked questions about Cricket League APK Game:</p>
|
108 |
-
<ol>
|
109 |
-
<li><b>What is the size of Cricket League APK Game?</b></li>
|
110 |
-
<p>The size of Cricket League APK Game varies depending on your device, but it is around 100 MB.</p>
|
111 |
-
<li><b>Is Cricket League APK Game safe to download?</b></li>
|
112 |
-
<p>Yes, Cricket League APK Game is safe to download from Google Play Store or APKCombo. However, you should always scan any downloaded file with an antivirus before installing it on your device.</p>
|
113 |
-
<li><b>Can I play Cricket League APK Game on PC?</b></li>
|
114 |
-
<p>No, Cricket League APK Game is only available for Android devices. However, you can use an Android emulator like Bluestacks or Nox Player to run Cricket League APK Game on your PC.</p>
|
115 |
-
<li><b>How can I contact the developer of Cricket League APK Game?</b></li>
|
116 |
-
<p>You can contact the developer of Cricket League APK Game by sending an email to support@miniclip.com or by visiting their website at https://www.miniclip.com/.</p>
|
117 |
-
<li><b>What are some similar games to Cricket League APK Game?</b></li>
|
118 |
-
<p>Some similar games to Cricket League APK Game are World Cricket Championship 2, Real Cricket 20, Stick Cricket Live, and Cricket Clash.</p>
|
119 |
-
</ol></p> 197e85843d<br />
|
120 |
-
<br />
|
121 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing 2 on PC and Mac - The Sequel to the Best Drifting Game on Android.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download CarX Drift PC and Enjoy Realistic Drifting Experience</h1>
|
3 |
-
<p>If you are a fan of racing games and drifting, you might have heard of <strong>CarX Drift PC</strong>, one of the most popular and realistic drift racing games on Android. But did you know that you can also play this game on your PC or Mac? In this article, we will show you how to download CarX Drift PC on your computer and enjoy the thrilling drifting experience on a bigger screen. We will also give you some tips and tricks on how to play CarX Drift PC and improve your skills.</p>
|
4 |
-
<h2>download carx drift pc</h2><br /><p><b><b>Download</b> ★★★ <a href="https://urlin.us/2uT1j6">https://urlin.us/2uT1j6</a></b></p><br /><br />
|
5 |
-
<h2>What is CarX Drift PC?</h2>
|
6 |
-
<h3>A brief introduction to the game and its features</h3>
|
7 |
-
<p>CarX Drift PC is a racing game developed by CarX Technologies, LLC. It is based on the mobile game <em>CarX Drift Racing</em>, which has over 30 million downloads on Google Play. CarX Drift PC is all about realistic driving physics, detailed customization and tuning of car parameters, a number of cities and special racing track locations, an array of vinyls to design the look of your vehicle, open online rooms and competitions enhanced with new graphics. You can choose from over 100 cars, from sports cars to muscle cars, and drift around corners, burn tires, and compete with other players online. You can also create your own tracks using the track editor and share them with the community.</p>
|
8 |
-
<h3>The benefits of playing CarX Drift PC on your computer</h3>
|
9 |
-
<p>Playing CarX Drift PC on your computer has many benefits. First of all, you can enjoy the game in fullscreen and HD resolutions, which will make the graphics more stunning and immersive. Secondly, you can use your mouse, keyboard, or gamepad to control your car, which will give you more accuracy and precision than using touch controls. Thirdly, you can access more features and options that are not available on the mobile version, such as VR support, advanced keymapping, macro recording, video recording, etc. Finally, you can save your progress and data on your computer, which will prevent any loss or corruption due to device issues.</p>
|
10 |
-
<h2>How to Download and Install CarX Drift PC on Your PC or Mac</h2>
|
11 |
-
<h3>The requirements and steps for downloading and installing CarX Drift PC on your PC or Mac using BlueStacks emulator</h3>
|
12 |
-
<p>To download and install CarX Drift PC on your PC or Mac, you will need an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your computer. There are many Android emulators available online, but we recommend using <strong>BlueStacks</strong>, which is one of the most popular and reliable ones. Here are the requirements and steps for downloading and installing CarX Drift PC on your PC or Mac using BlueStacks emulator:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Make sure that your computer meets the minimum system requirements for running BlueStacks. You will need Windows 7 or higher, 4 GB of RAM, 5 GB of free disk space, and an updated graphics driver. If you have a Mac, you will need macOS Sierra or higher, 4 GB of RAM, 4 GB of free disk space, and an updated graphics driver. You can check your system specifications by going to the Settings or System Preferences on your computer.</li>
|
15 |
-
<li>Download and install BlueStacks from the official website. Follow the instructions on the screen and agree to the terms and conditions. The installation process may take a few minutes depending on your internet speed and computer performance.</li>
|
16 |
-
<li>Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free. This will allow you to access the Google Play Store and download apps and games.</li>
|
17 |
-
<li>Go to the Google Play Store and search for CarX Drift PC. Alternatively, you can use this link to go directly to the game page. Click on the Install button and wait for the download and installation to complete.</li>
|
18 |
-
<li>Once the game is installed, you can find it on the BlueStacks home screen or in the My Apps tab. Click on the game icon to launch it and start playing CarX Drift PC on your PC or Mac.</li>
|
19 |
-
</ul>
|
20 |
-
<h3>The advantages of using BlueStacks emulator to play CarX Drift PC</h3>
|
21 |
-
<p>Using BlueStacks emulator to play CarX Drift PC has many advantages. Here are some of them:</p>
|
22 |
-
<ul>
|
23 |
-
<li>You can enjoy faster loading times and smoother gameplay than on your mobile device.</li>
|
24 |
-
<li>You can customize your keyboard or gamepad controls to suit your preferences and style. You can also use the mouse to steer your car and adjust the camera angle.</li>
|
25 |
-
<li>You can use the BlueStacks features to enhance your gaming experience, such as Multi-Instance, Eco Mode, Game Mode, Screen Recorder, Macro Recorder, etc.</li>
|
26 |
-
<li>You can play CarX Drift PC in VR mode if you have a compatible VR headset and controller. This will make you feel like you are inside the car and give you a more immersive drifting experience.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>How to Play CarX Drift PC and Improve Your Skills</h2>
|
29 |
-
<h3>The basic controls and tips for playing CarX Drift PC on your PC or Mac</h3>
|
30 |
-
<p>Playing CarX Drift PC on your PC or Mac is easy and fun. Here are the basic controls and tips for playing CarX Drift PC on your computer:</p>
|
31 |
-
<table>
|
32 |
-
<tr><th>Control</th><th>Action</th></tr>
|
33 |
-
<tr><td>W or Up Arrow</td><td>Accelerate</td></tr>
|
34 |
-
<tr><td>S or Down Arrow</td><td>Brake/Reverse</td></tr>
|
35 |
-
<tr><td>A or Left Arrow</td><td>Steer Left</td></tr>
|
36 |
-
<tr><td>D or Right Arrow</td><td>Steer Right</td></tr>
|
37 |
-
<tr><td>Spacebar</td><td>Handbrake</td></tr>
|
38 |
-
<tr><td>N</td><td>Nitro Boost</td></tr>
|
39 |
-
<tr><td>C</td><td>Change Camera View</td></tr>
|
40 |
-
<tr><td>M</td><td>Mute/Unmute Sound</td></tr>
|
41 |
-
<tr><td>P</td><td>Pause/Resume Game</td></tr>
|
42 |
-
<tr><td>Esc</td><td>Exit Game/Menu</td></tr>
|
43 |
-
</table>
|
44 |
-
<p>Here are some tips to help you play better:</p>
|
45 |
-
<p>download carx drift racing online pc<br />
|
46 |
-
download carx drift racing online steam workshop<br />
|
47 |
-
download carx drift racing online mod tools<br />
|
48 |
-
download carx drift racing online bluestacks<br />
|
49 |
-
download carx drift racing online free<br />
|
50 |
-
download carx drift racing online full version<br />
|
51 |
-
download carx drift racing online windows 10<br />
|
52 |
-
download carx drift racing online apk<br />
|
53 |
-
download carx drift racing online mac<br />
|
54 |
-
download carx drift racing online android emulator<br />
|
55 |
-
download carx drift racing pc game<br />
|
56 |
-
download carx drift racing pc crack<br />
|
57 |
-
download carx drift racing pc setup<br />
|
58 |
-
download carx drift racing pc offline<br />
|
59 |
-
download carx drift racing pc highly compressed<br />
|
60 |
-
download carx drift racing pc latest version<br />
|
61 |
-
download carx drift racing pc windows 7<br />
|
62 |
-
download carx drift racing pc mod apk<br />
|
63 |
-
download carx drift racing pc nox player<br />
|
64 |
-
download carx drift racing pc cheats<br />
|
65 |
-
how to download carx drift racing on pc<br />
|
66 |
-
how to download carx drift racing mods on pc<br />
|
67 |
-
how to download carx drift racing for free on pc<br />
|
68 |
-
how to download carx drift racing steam workshop on pc<br />
|
69 |
-
how to download carx drift racing bluestacks on pc<br />
|
70 |
-
how to install carx drift racing on pc<br />
|
71 |
-
how to play carx drift racing on pc<br />
|
72 |
-
how to update carx drift racing on pc<br />
|
73 |
-
how to uninstall carx drift racing on pc<br />
|
74 |
-
how to use controller for carx drift racing on pc<br />
|
75 |
-
best site to download carx drift racing for pc<br />
|
76 |
-
best way to download carx drift racing for pc<br />
|
77 |
-
best settings for carx drift racing on pc<br />
|
78 |
-
best graphics for carx drift racing on pc<br />
|
79 |
-
best cars for carx drift racing on pc<br />
|
80 |
-
best tracks for carx drift racing on pc<br />
|
81 |
-
best mods for carx drift racing on pc<br />
|
82 |
-
best tips for carx drift racing on pc<br />
|
83 |
-
best tricks for carx drift racing on pc<br />
|
84 |
-
best cheats for carx drift racing on pc</p>
|
85 |
-
<ul>
|
86 |
-
<li>To drift, you need to use the handbrake and steer in the opposite direction of the turn. You also need to balance the throttle and brake to maintain the drift angle and speed.</li>
|
87 |
-
<li>To earn more points, you need to drift as long as possible, as close as possible to the walls and other objects, and as fast as possible. You also need to chain multiple drifts together without losing control.</li>
|
88 |
-
<li>To unlock more cars, tracks, vinyls, and upgrades, you need to complete missions, challenges, tournaments, and online events. You also need to earn coins and gold by drifting, winning races, watching ads, etc.</li>
|
89 |
-
<li>To customize and tune your car, you need to go to the garage menu and select the options you want. You can change the color, wheels, body kits, spoilers, exhausts, etc. of your car. You can also adjust the engine power, suspension stiffness, steering angle, tire pressure, etc. of your car.</li>
|
90 |
-
</ul>
|
91 |
-
<h3>The best practices and strategies for mastering the drift and competing online</h3>
|
92 |
-
<p>If you want to master the drift and compete online with other players, you need to practice a lot and learn from your mistakes. Here are some best practices and strategies for mastering the drift and competing online:</p>
|
93 |
-
<ul>
|
94 |
-
<li>Watch the tutorials and guides on the game menu and on YouTube. They will teach you the basics and advanced techniques of drifting, such as counter-steering, weight shifting, throttle control, etc.</li>
|
95 |
-
<li>Practice on different tracks and cars. Each track and car has its own characteristics and challenges. You need to adapt your driving style and settings to suit them.</li>
|
96 |
-
<li>Learn from other players. You can watch the replays of the top players and see how they drift, what lines they take, what settings they use, etc. You can also join online rooms and chat with other players, ask for tips, feedback, etc.</li>
|
97 |
-
<li>Challenge yourself. You can set your own goals and try to beat them, such as achieving a certain score, time, speed, etc. You can also participate in online events and tournaments and compete with other players from around the world.</li>
|
98 |
-
</ul>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>CarX Drift PC is a great game for anyone who loves racing and drifting. It offers realistic driving physics, detailed customization and tuning of car parameters, a number of cities and special racing track locations, an array of vinyls to design the look of your vehicle, open online rooms and competitions enhanced with new graphics. You can download and play CarX Drift PC on your PC or Mac using BlueStacks emulator, which will give you many benefits and features. You can also improve your skills by following the tips and tricks we shared in this article. We hope you enjoyed this article and found it helpful. Now go ahead and download CarX Drift PC and enjoy the realistic drifting experience on your computer.</p>
|
101 |
-
<h2>FAQs</h2>
|
102 |
-
<h3>Q1: Is CarX Drift PC free to play?</h3>
|
103 |
-
<p>A1: Yes, CarX Drift PC is free to play. However, it contains in-app purchases that allow you to buy coins, gold, cars, tracks, vinyls, etc. You can also watch ads to earn some rewards.</p>
|
104 |
-
<h3>Q2: Can I customize and tune my car in CarX Drift PC?</h3>
|
105 |
-
<p>A2: Yes, you can customize and tune your car in CarX Drift PC. You can change the color, wheels, body kits, spoilers, exhausts, etc. of your car. You can also adjust the engine power, suspension stiffness, steering angle, tire pressure, etc. of your car.</p>
|
106 |
-
<h3>Q3: Can I play CarX Drift PC offline?</h3>
|
107 |
-
<p>A3: Yes, you can play CarX Drift PC offline. However, you will not be able to access some features and modes that require an internet connection, such as online rooms, events, tournaments, etc.</p>
|
108 |
-
<h3>Q4: What are the best cars to use in CarX Drift PC?</h3>
|
109 |
-
<p>A4: There is no definitive answer to this question, as different cars have different strengths and weaknesses. It also depends on your personal preference and style. However, some of the most popular and recommended cars are the Nissan Skyline GT-R R34 V-Spec II Nür (Nissan GT-R), the Toyota Supra RZ (Toyota Supra), the Mazda RX-7 FD (Mazda RX-7), the BMW M3 E46 (BMW M3), and the Subaru Impreza WRX STI (Subaru WRX).</p>
|
110 |
-
<h3>Q5: How can I contact the developers of CarX Drift PC?</h3>
|
111 |
-
<p>A5: You can contact the developers of CarX Drift PC by sending an email to support@carx-tech.com or by visiting their website at https://carx-tech.com/. You can also follow them on Facebook at https://www.facebook.com/carxdriftracing/ or on Instagram at https://www.instagram.com/carxdriftracing/.</p> 197e85843d<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/AFK Soccer The Football Game that Plays Itself.md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>AFK Soccer Mod APK: A Review</h1>
|
3 |
-
<p>If you are a fan of football games and want to build your dream team with the stars from all around the world, you might want to check out AFK Soccer. This is an online mobile sports game that combines RPG elements with amazing football gameplay. But what if you want to enjoy the game without any limitations or restrictions? That's where AFK Soccer Mod APK comes in. In this article, we will review what AFK Soccer is, what AFK Soccer Mod APK is, and how to download and install it. We will also share some tips and tricks for improving your game in AFK Soccer.</p>
|
4 |
-
<h2>afk soccer mod apk</h2><br /><p><b><b>Download Zip</b> ⚙ <a href="https://jinyurl.com/2uNOYr">https://jinyurl.com/2uNOYr</a></b></p><br /><br />
|
5 |
-
<h2>What is AFK Soccer?</h2>
|
6 |
-
<p>AFK Soccer is a game developed by Texas Poker Cassino Games and released in June 2022. It is available for both Android and iOS devices. The game has over 50,000 downloads on Google Play Store and has positive reviews from users.</p>
|
7 |
-
<h3>Features of AFK Soccer</h3>
|
8 |
-
<p>AFK Soccer has many features that make it an enjoyable and addictive game for football fans. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>COLLECT AND LEVEL UP UNIQUE FOOTBALL STARS</b>: You can collect dozens of original football stars from the top countries in the world and level them up to make them stronger and more skilled.</li>
|
11 |
-
<li><b>AMAZING FOOTBALL GAMEPLAY</b>: AFK Soccer has a deep, yet simple-to-understand, 5v5 football game simulation engine. Each star makes decisions based on his preferred style of play. You will be amazed by the incredible plays that will unfold right before your eyes! Each game is guaranteed to be different from the previous one.</li>
|
12 |
-
<li><b>STRATEGY, NOT REFLEXES</b>: To play this game well, you need to make great strategic decisions - which formation to bring to the match, which stars do you invest your resources to level them up. You don’t need to master complex controls on a touchscreen - the game takes care of that for you!</li>
|
13 |
-
<li><b>CONTINUOUS REWARDS</b>: Don’t have time to play hours per day? Don’t worry, this game is perfect for you! You gain resources even while you’re away. Come back once per day, level up your stars, and make progress in the many game modes available. This is a chill online sports mobile game that respects your time!</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Gameplay of AFK Soccer</h3>
|
16 |
-
<p>The gameplay of AFK Soccer is simple and fun. You start by choosing a country to represent and then selecting five stars to form your team. You can choose from different positions such as goalkeeper, defender, midfielder, or striker. Each star has different attributes such as speed, power, skill, stamina, and luck.</p>
|
17 |
-
<p>Once you have your team ready, you can enter different modes such as league mode, tournament mode, or challenge mode. In each mode, you will face different opponents with different levels of difficulty. You can watch the match unfold in real-time or skip it if you want. The match lasts for 90 seconds and the team with the most goals wins. You can also use special skills such as speed boost, power shot, or skill pass to gain an advantage over your rivals.</p>
|
18 |
-
<h3>Strategy of AFK Soccer</h3>
|
19 |
-
<p>AFK Soccer is not just a game of luck, but also a game of strategy. You need to plan ahead and make smart choices to succeed in this game. Some of the strategic aspects of AFK Soccer are:</p>
|
20 |
-
<ul>
|
21 |
-
<li><b>FORMATION</b>: You can choose from different formations such as 4-4-2, 3-5-2, or 4-3-3. Each formation has its own strengths and weaknesses, and you need to adapt to the situation and the opponent. For example, a 4-4-2 formation is balanced and versatile, but it might struggle against a 3-5-2 formation that has more midfielders.</li>
|
22 |
-
<li><b>STAR SELECTION</b>: You can collect and level up dozens of stars from different countries and positions. Each star has his own personality, style of play, and special skill. You need to choose the stars that suit your formation, strategy, and preference. For example, if you want to play a fast and aggressive game, you might want to choose stars with high speed and power attributes.</li>
|
23 |
-
<li><b>RESOURCE MANAGEMENT</b>: You need to manage your resources wisely in this game. You have two main resources: coins and gems. Coins are used to level up your stars and buy new ones. Gems are used to unlock special skills and buy premium items. You can earn coins and gems by playing matches, completing quests, or watching ads. You can also buy them with real money if you want.</li>
|
24 |
-
</ul>
|
25 |
-
<h3>Rewards of AFK Soccer</h3>
|
26 |
-
<p>AFK Soccer is a rewarding game that gives you many incentives to keep playing and improving your team. Some of the rewards that you can get from this game are:</p>
|
27 |
-
<p>afk soccer rpg football games apk<br />
|
28 |
-
afk soccer mod apk unlimited money<br />
|
29 |
-
afk soccer hack apk download<br />
|
30 |
-
afk soccer mod apk latest version<br />
|
31 |
-
afk soccer apk mod menu<br />
|
32 |
-
afk soccer mod apk android 1<br />
|
33 |
-
afk soccer mod apk free shopping<br />
|
34 |
-
afk soccer mod apk revdl<br />
|
35 |
-
afk soccer mod apk offline<br />
|
36 |
-
afk soccer mod apk no ads<br />
|
37 |
-
afk soccer rpg football games mod apk<br />
|
38 |
-
afk soccer unlimited coins and gems apk<br />
|
39 |
-
afk soccer mod apk unlimited everything<br />
|
40 |
-
afk soccer mod apk rexdl<br />
|
41 |
-
afk soccer mod apk happymod<br />
|
42 |
-
afk soccer mod apk 0.6.0<br />
|
43 |
-
afk soccer mod apk 0.5.0<br />
|
44 |
-
afk soccer mod apk 0.4.0<br />
|
45 |
-
afk soccer mod apk 0.2.6<br />
|
46 |
-
afk soccer mod apk 0.1.0<br />
|
47 |
-
download afk soccer mod apk for android<br />
|
48 |
-
download afk soccer rpg football games mod apk<br />
|
49 |
-
download game afk soccer mod apk<br />
|
50 |
-
how to install afk soccer mod apk<br />
|
51 |
-
how to play afk soccer mod apk<br />
|
52 |
-
how to update afk soccer mod apk<br />
|
53 |
-
is afk soccer mod apk safe<br />
|
54 |
-
where to download afk soccer mod apk<br />
|
55 |
-
best site to download afk soccer mod apk<br />
|
56 |
-
best settings for afk soccer mod apk<br />
|
57 |
-
tips and tricks for afk soccer mod apk<br />
|
58 |
-
cheats and hacks for afk soccer mod apk<br />
|
59 |
-
guide and walkthrough for afk soccer mod apk<br />
|
60 |
-
review and rating for afk soccer mod apk<br />
|
61 |
-
features and benefits of afk soccer mod apk<br />
|
62 |
-
pros and cons of afk soccer mod apk<br />
|
63 |
-
comparison and alternatives of afk soccer mod apk<br />
|
64 |
-
gameplay and graphics of afk soccer mod apk<br />
|
65 |
-
strategy and tactics of afk soccer mod apk<br />
|
66 |
-
characters and skills of afk soccer mod apk<br />
|
67 |
-
leagues and tournaments of afk soccer mod apk<br />
|
68 |
-
rewards and achievements of afk soccer mod apk<br />
|
69 |
-
updates and news of afk soccer mod apk<br />
|
70 |
-
bugs and issues of afk soccer mod apk<br />
|
71 |
-
support and feedback of afk soccer mod apk</p>
|
72 |
-
<ul>
|
73 |
-
<li><b>TROPHIES AND RANKINGS</b>: You can earn trophies by winning matches and tournaments. Trophies are used to determine your ranking in the global leaderboard. You can compete with other players from all over the world and see who is the best AFK Soccer player.</li>
|
74 |
-
<li><b>CHESTS AND CARDS</b>: You can get chests by playing matches or completing quests. Chests contain cards that can be used to unlock new stars or upgrade existing ones. There are different types of chests such as bronze, silver, gold, or platinum. The higher the quality of the chest, the better the cards inside.</li>
|
75 |
-
<li><b>ACHIEVEMENTS AND BADGES</b>: You can complete various achievements by playing the game and fulfilling certain conditions. Achievements give you extra coins, gems, or chests as rewards. You can also earn badges by collecting a certain number of stars from a specific country or position. Badges show your progress and dedication in the game.</li>
|
76 |
-
</ul>
|
77 |
-
<h2>What is AFK Soccer Mod APK?</h2>
|
78 |
-
<p>AFK Soccer Mod APK is a modified version of the original AFK Soccer game that gives you some extra features and advantages that are not available in the official version. Some of these features are:</p>
|
79 |
-
<h3>Benefits of AFK Soccer Mod APK</h3>
|
80 |
-
<p>AFK Soccer Mod APK has many benefits that make it more enjoyable and convenient for players who want to have more fun and less hassle in the game. Some of these benefits are:</p>
|
81 |
-
<ul>
|
82 |
-
<li><b>UNLIMITED COINS AND GEMS</b>: With AFK Soccer Mod APK, you don't have to worry about running out of coins or gems ever again. You can get unlimited amounts of these resources for free and use them to level up your stars, buy new ones, unlock skills, or buy premium items.</li>
|
83 |
-
<li><b>ALL STARS UNLOCKED</b>: With AFK Soccer Mod APK, you don't have to wait for chests or cards to unlock new stars. You can access all the stars in the game from the start and choose whoever you want for your team.</li>
|
84 |
-
<li><b>NO ADS</b>: With AFK Soccer Mod APK, you don't have to watch annoying ads every time you want to open a chest or get some extra coins or gems. You can enjoy the game without any interruptions or distractions.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>Risks of AFK Soccer Mod APK</h3>
|
87 |
-
<p>AFK Soccer Mod APK is not an official version of the game and it is not endorsed or supported by the developers or publishers of AFK Soccer. Therefore, using AFK Soccer Mod APK comes with some risks that you should be aware of before downloading and installing it. Some of these risks are:</p>
|
88 |
-
<ul>
|
89 |
-
<li><b>BAN OR SUSPENSION</b>: Using AFK Soccer Mod APK might violate the terms and conditions of the game and result in your account being banned or suspended. You might lose all your progress and achievements in the game and be unable to play it anymore.</li>
|
90 |
-
<li><b>VIRUS OR MALWARE</b>: Downloading AFK Soccer Mod APK from unknown or untrusted sources might expose your device to virus or malware infection. This might harm your device's performance, security, or privacy. You might lose your personal data or have your device hacked by malicious actors.</li>
|
91 |
-
<li><b>COMPATIBILITY ISSUES</b>: AFK Soccer Mod APK might not be compatible with the latest version of the game or your device's operating system. This might cause the game to crash, freeze, or glitch. You might experience poor graphics, sound, or gameplay quality.</li>
|
92 |
-
</ul>
|
93 |
-
<h3>How to Download and Install AFK Soccer Mod APK</h3>
|
94 |
-
<p>If you still want to try AFK Soccer Mod APK despite the risks, you need to follow some steps to download and install it on your device. Here are the steps:</p>
|
95 |
-
<ol>
|
96 |
-
<li><b>UNINSTALL THE ORIGINAL GAME</b>: You need to uninstall the official version of AFK Soccer from your device before installing the modded version. This is to avoid any conflicts or errors between the two versions.</li>
|
97 |
-
<li><b>FIND A TRUSTED SOURCE</b>: You need to find a reliable and reputable source that provides AFK Soccer Mod APK for download. You can search online for reviews, ratings, or feedback from other users who have tried the modded version. You can also use antivirus software to scan the file before downloading it.</li>
|
98 |
-
<li><b>ENABLE UNKNOWN SOURCES</b>: You need to enable unknown sources on your device's settings to allow the installation of apps from sources other than Google Play Store. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
|
99 |
-
<li><b>DOWNLOAD AND INSTALL THE MODDED VERSION</b>: You need to download the AFK Soccer Mod APK file from the source you have chosen and save it on your device's storage. Then, you need to locate the file and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
|
100 |
-
<li><b>ENJOY THE GAME</b>: You can now launch the game and enjoy the modded features. You can also disable unknown sources on your device's settings after installing the game for security reasons.</li>
|
101 |
-
</ol>
|
102 |
-
<h2>Tips and Tricks for AFK Soccer</h2>
|
103 |
-
<p>If you want to improve your skills and performance in AFK Soccer, you can follow some tips and tricks that will help you in the game. Here are some of them:</p>
|
104 |
-
<h3>How to Improve Your Technical Skills</h3>
|
105 |
-
<p>Your technical skills are important in AFK Soccer as they determine how well you can control the ball, pass, shoot, dribble, or defend. To improve your technical skills, you can do the following:</p>
|
106 |
-
<ul>
|
107 |
-
<li><b>PRACTICE IN TRAINING MODE</b>: You can practice your technical skills in training mode where you can choose different drills such as shooting, passing, dribbling, or defending. You can also adjust the difficulty level and the number of stars involved in each drill.</li>
|
108 |
-
<li><b>WATCH REPLAYS AND LEARN FROM MISTAKES</b>: You can watch replays of your matches and analyze what you did right or wrong in each situation. You can learn from your mistakes and see how you can improve your decision-making, positioning, timing, or execution.</li>
|
109 |
-
<li><b>TRY DIFFERENT STARS AND SKILLS</b>: You can try different stars and skills in different positions and formations. You can see how each star performs in different scenarios and how each skill affects the outcome of the match. You can also experiment with different combinations of stars and skills to find your optimal strategy.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>How to Choose the Best Formation and Stars</h3>
|
112 |
-
<p>Your formation and stars are crucial in AFK Soccer as they determine how you play and how you counter your opponent's strategy. To choose the best formation and stars, you can do the following:</p>
|
113 |
-
<ul>
|
114 |
-
<li><b>MATCH YOUR FORMATION TO YOUR PLAYSTYLE</b>: You can choose a formation that suits your playstyle and preference. For example, if you like to play a defensive game, you can choose a formation with more defenders such as 5-4-1 or 4-5-1. If you like to play an offensive game, you can choose a formation with more attackers such as 4-3-3 or 3-4-3.</li>
|
115 |
-
<li><b>ADAPT YOUR FORMATION TO YOUR OPPONENT</b>: You can also change your formation according to your opponent's strategy and formation. For example, if your opponent is playing a defensive formation, you can choose an offensive formation to break their defense. If your opponent is playing an offensive formation, you can choose a defensive formation to counter their attack.</li>
|
116 |
-
<li><b>BALANCE YOUR STARS AND SKILLS</b>: You can also balance your stars and skills in your formation to create a well-rounded team. For example, you can have a mix of stars with high speed, power, skill, stamina, and luck attributes. You can also have a mix of skills such as speed boost, power shot, skill pass, or defense boost.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>How to Earn More Resources and Progress Faster</h3>
|
119 |
-
<p>Your resources and progress are important in AFK Soccer as they allow you to level up your stars, unlock new ones, and access more game modes and features. To earn more resources and progress faster, you can do the following:</p>
|
120 |
-
<ul>
|
121 |
-
<li><b>PLAY MORE MATCHES AND MODES</b>: You can earn more coins and gems by playing more matches and modes in the game. You can play league mode, tournament mode, or challenge mode to earn different amounts of coins and gems. You can also get bonus coins and gems by winning matches, scoring goals, or completing quests.</li>
|
122 |
-
<li><b>OPEN MORE CHESTS AND CARDS</b>: You can get more chests and cards by playing matches or completing quests. Chests and cards contain stars that you can use to unlock new ones or upgrade existing ones. You can also get rare or legendary stars that have higher attributes and skills.</li>
|
123 |
-
<li><b>WATCH ADS AND CLAIM FREE GIFTS</b>: You can watch ads or claim free gifts in the game to get extra coins, gems, chests, or cards. You can watch ads every few hours or after opening a chest to get more rewards. You can also claim free gifts every day or every week by logging in to the game.</li>
|
124 |
-
</ul>
|
125 |
-
<h2>Conclusion</h2>
|
126 |
-
<p>AFK Soccer is a fun and addictive game that combines football gameplay with RPG elements. You can collect and level up dozens of unique football stars from different countries and positions. You can also enjoy amazing football gameplay with different modes and strategies. However, if you want to have more freedom and convenience in the game, you might want to try AFK Soccer Mod APK. This is a modified version of the game that gives you unlimited coins and gems, all stars unlocked, and no ads. However, you should also be aware of the risks of using AFK Soccer Mod APK such as ban or suspension, virus or malware infection, or compatibility issues. Therefore, you should download and install AFK Soccer Mod APK at your own risk and discretion.</p>
|
127 |
-
<h2>FAQs</h2>
|
128 |
-
<p>Here are some frequently asked questions about AFK Soccer and AFK Soccer Mod APK:</p>
|
129 |
-
<ol>
|
130 |
-
<li><b>Is AFK Soccer free to play?</b>: Yes, AFK Soccer is free to play and download on both Android and iOS devices. However, it also contains in-app purchases that allow you to buy coins, gems, or premium items with real money.</li>
|
131 |
-
<li><b>Is AFK Soccer online or offline?</b>: AFK Soccer is an online game that requires an internet connection to play. You need to connect to the game server to access the game modes, features, and rewards.</li>
|
132 |
-
<li><b>Is AFK Soccer Mod APK safe to use?</b>: AFK Soccer Mod APK is not an official version of the game and it is not endorsed or supported by the developers or publishers of AFK Soccer. Therefore, using AFK Soccer Mod APK might be risky and unsafe for your device and account. You should only download and install AFK Soccer Mod APK from trusted sources and scan the file with antivirus software before installing it.</li>
|
133 |
-
<li><b>How do I update AFK Soccer Mod APK?</b>: AFK Soccer Mod APK might not be compatible with the latest version of the game or your device's operating system. Therefore, you might need to update AFK Soccer Mod APK regularly to keep it working properly. To update AFK Soccer Mod APK, you need to uninstall the old version and download and install the new version from the same source.</li>
|
134 |
-
<li><b>How do I contact the support team of AFK Soccer?</b>: If you have any questions, problems, or feedback about AFK Soccer or AFK Soccer Mod APK, you can contact the support team of AFK Soccer by sending an email to afksoccer@gmail.com.</li>
|
135 |
-
</ol></p> 197e85843d<br />
|
136 |
-
<br />
|
137 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Shell Racing Legends APK and Drive Exclusive Ferrari Models with Remote Control.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Shell Racing Legends APK: A Game for Ferrari Fans</h1>
|
3 |
-
<p>If you are a fan of Ferrari cars and racing games, you might want to check out Shell Racing Legends APK, a game that lets you collect, connect, and compete with an exclusive Ferrari car collection. In this article, we will tell you what Shell Racing Legends APK is, how to play it, why you should download it, and how to download and install it on your Android device.</p>
|
4 |
-
<h2>shell racing legends apk</h2><br /><p><b><b>Download Zip</b> » <a href="https://jinyurl.com/2uNLe9">https://jinyurl.com/2uNLe9</a></b></p><br /><br />
|
5 |
-
<h2>What is Shell Racing Legends APK?</h2>
|
6 |
-
<p>Shell Racing Legends APK is a racing game developed by Carbon12011, in collaboration with Shell and Ferrari. The game is based on the Shell Road Toy Cars collection, which features four die-cast Ferrari models that you can buy at Shell gas stations. The game allows you to experience these cars in both the real and digital worlds, by connecting them with Bluetooth to your device and racing them on different tracks. You can also unlock the digital version of each car by scanning it with the app and adding it to your garage.</p>
|
7 |
-
<h3>How to play Shell Racing Legends APK?</h3>
|
8 |
-
<p>The game has four main aspects that you can enjoy:</p>
|
9 |
-
<h4>Drive with remote control</h4>
|
10 |
-
<p>You can turn on and control your car by connecting it with Bluetooth to the app and using your device as a remote controller. You can challenge your friends and family to high-intensity real-world racing, or practice your skills on your own.</p>
|
11 |
-
<h4>Build your garage</h4>
|
12 |
-
<p>You can unlock the digital version of each car by scanning it with the app and adding it to your garage. You can also customize your cars with different colors, decals, and upgrades. You can view the history and key statistics of each car in your garage, as well as compare them with other cars.</p>
|
13 |
-
<p>shell racing legends game download<br />
|
14 |
-
shell racing legends ferrari collection<br />
|
15 |
-
shell racing legends bluetooth car<br />
|
16 |
-
shell racing legends android app<br />
|
17 |
-
shell racing legends remote control<br />
|
18 |
-
shell racing legends apk mod<br />
|
19 |
-
shell racing legends latest version<br />
|
20 |
-
shell racing legends xapk<br />
|
21 |
-
shell racing legends free download<br />
|
22 |
-
shell racing legends car history<br />
|
23 |
-
shell racing legends driving methods<br />
|
24 |
-
shell racing legends digital tracks<br />
|
25 |
-
shell racing legends garage<br />
|
26 |
-
shell racing legends challenges<br />
|
27 |
-
shell racing legends leader board<br />
|
28 |
-
shell racing legends driver assistance<br />
|
29 |
-
shell racing legends tilt control<br />
|
30 |
-
shell racing legends touch control<br />
|
31 |
-
shell racing legends car statistics<br />
|
32 |
-
shell racing legends objectives<br />
|
33 |
-
shell racing legends die-cast models<br />
|
34 |
-
shell racing legends real world racing<br />
|
35 |
-
shell racing legends high intensity<br />
|
36 |
-
shell racing legends exclusive ferrari<br />
|
37 |
-
shell racing legends carbon12011<br />
|
38 |
-
shell racing legends google play id<br />
|
39 |
-
shell racing legends apkcombo<br />
|
40 |
-
shell racing legends install guide<br />
|
41 |
-
shell racing legends old versions<br />
|
42 |
-
shell racing legends update 2023<br />
|
43 |
-
shell racing legends review ratings<br />
|
44 |
-
shell racing legends gameplay video<br />
|
45 |
-
shell racing legends screenshots images<br />
|
46 |
-
shell racing legends user comments feedbacks<br />
|
47 |
-
shell racing legends tips tricks cheats<br />
|
48 |
-
shell racing legends support contact email<br />
|
49 |
-
shell racing legends privacy policy terms of service <br />
|
50 |
-
shell racing legends compatible devices requirements <br />
|
51 |
-
shell racing legends offline online mode <br />
|
52 |
-
shell racing legends multiplayer singleplayer mode <br />
|
53 |
-
shell racing legends simulation arcade mode <br />
|
54 |
-
shell racing legends graphics quality settings <br />
|
55 |
-
shell racing legends sound effects music settings <br />
|
56 |
-
shell racing legends language options settings <br />
|
57 |
-
shell racing legends notifications settings <br />
|
58 |
-
shell racing legends restore purchases settings <br />
|
59 |
-
shell racing legends credits acknowledgements <br />
|
60 |
-
shell racing legends bugs issues fixes <br />
|
61 |
-
shell racing legends new features improvements</p>
|
62 |
-
<h4>Complete challenges</h4>
|
63 |
-
<p>You can compete with your collection on different tracks and modes, such as time trial, race, drift, and drag. You can earn trophies by completing objectives and unlocking the next challenge. You can also see how you rank against other players on the leaderboard.</p>
|
64 |
-
<h4>Driving methods</h4>
|
65 |
-
<p>You can go to Settings and choose the controls that fit your driving style. You can tilt the device or touch the screen to steer, choose your level of driver assistance, and adjust the sensitivity and feedback. You can also remove the driver assistance to be more competitive and improve your skills and times.</p>
|
66 |
-
<h4>Car history</h4>
|
67 |
-
<p>You can discover the history and key statistics behind each of these exclusive Ferraris once they are unlocked and in your garage. You can learn about their design, performance, features, and achievements in the real world.</p>
|
68 |
-
<h2>Why should you download Shell Racing Legends APK?</h2>
|
69 |
-
<p>Shell Racing Legends APK is a game that offers many features that make it fun, exciting, and realistic. Here are some of them:</p>
|
70 |
-
<h3>Features of Shell Racing Legends APK</h3>
|
71 |
-
<h4>High-quality graphics and sound</h4>
|
72 |
-
<p>The game has stunning 3D graphics that bring the cars and tracks to life. The game also has realistic sound effects that enhance the racing experience.</p>
|
73 |
-
<h4>Realistic physics and gameplay</h4>
|
74 |
-
<p>The game has realistic physics that simulate the behavior of the cars on different surfaces and conditions. The game also has realistic gameplay that requires you to master the skills of braking, accelerating, steering, drifting, and overtaking.</p>
|
75 |
-
<h4>Exclusive Ferrari car collection</h4>
|
76 |
-
<p>The game features four die-cast Ferrari models that are based on the real ones. They are:</p>
|
77 |
-
<ul>
|
78 |
-
<li>Ferrari 250 Testa Rossa (1957)</li>
|
79 |
-
<li>Ferrari 250 GTO (1962)</li>
|
80 |
-
<li>Ferrari 512 <h4>Ferrari F40 (1987)</h4>
|
81 |
-
<p>The Ferrari F40 was the last Ferrari model personally approved by Enzo Ferrari before his death in 1988. It was a celebration of Ferrari's 40th anniversary and a successor to the 288 GTO. The F40 was designed by Pininfarina and engineered by Nicola Materazzi, who also worked on the 288 GTO. The F40 was a mid-engine, rear-wheel drive sports car with a twin-turbocharged V8 engine that produced 478 PS (352 kW; 471 hp) in the European version and 484 PS (356 kW; 477 hp) in the US version. The F40 was one of the fastest, most powerful, and most expensive cars of its time, with a top speed of 340 km/h (211 mph) and a price tag of US$ 400,000 in 1987 ($950,000 today). The F40 was also one of the most driver-focused cars of its era, with no radio, carpet, or inner-door panels and a windshield made of plastic. Only 1,311 units were produced from 1987 to 1992, making it a rare and highly sought-after collector's item.</p> <h4>Ferrari 488 GTB (2015)</h4>
|
82 |
-
<p>The Ferrari 488 GTB is the successor of the 458, and the first mid-engine Ferrari to use a turbocharged V8 engine since the F40. The 488 GTB was launched in 2015, and it is based on the Ferrari 488 GTE and GT3 race cars. The 488 GTB has a 3.9-litre twin-turbocharged V8 engine that produces 670 PS (493 kW; 661 hp) at 8,000 rpm and 760 N⋅m (561 lb⋅ft) of torque at 3,000 rpm. The engine has a specific power output of 171.7 PS (126.3 kW; 169.4 hp) per litre, which is the highest of any road-going Ferrari. The engine also has a variable torque management system that adjusts the torque delivery according to the gear selected. </p>
|
83 |
-
<p>The 488 GTB has a 7-speed dual-clutch automatic transmission that can shift gears in just 8 milliseconds. The transmission also has a variable torque management system that adjusts the torque delivery according to the gear selected. The car has a rear mid-engine, rear-wheel drive layout, with an electronic differential and a traction control system. The car also has a magnetorheological suspension system that adapts to the road conditions and driving mode. </p>
|
84 |
-
<p>The 488 GTB has a body designed by Ferrari Styling Centre under Flavio Manzoni, and it is inspired by the Ferrari 308 GTB and the LaFerrari. The car has a low, wide, and aggressive stance, with large air intakes, LED headlights, and a sculpted rear spoiler. The car also has aerodynamic features that improve its performance and efficiency, such as a double front spoiler, an underbody vortex generator, an active rear diffuser, and a blown rear spoiler. The car has a drag coefficient of 0.32 and generates up to 325 kg (717 lb) of downforce at 250 km/h (155 mph). </p>
|
85 |
-
<p>The 488 GTB has a cockpit that is designed to be ergonomic and driver-oriented. The car has a leather-wrapped steering wheel that incorporates the start button, the manettino switch, and the shift paddles. The car also has a digital instrument cluster that displays various information such as speed, rpm, gear, lap time, and driving mode. The car also has a central infotainment system that controls the audio, navigation, and connectivity functions. The car also has carbon-fibre racing seats that provide comfort and support.</p>. <h2>Different tracks and modes</h2>
|
86 |
-
<p>The game has different tracks and modes that you can choose from, depending on your preference and skill level. You can race on iconic circuits such as Monza, Silverstone, Spa-Francorchamps, and Yas Marina, or on urban tracks such as New York, London, Shanghai, and Dubai. You can also race on night tracks or in different weather conditions, such as rain, snow, or fog. </p>
|
87 |
-
<p>The game has different modes that you can play, such as:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Single Race: You can race against the AI or against your friends in local multiplayer mode. You can choose the track, the car, the difficulty, and the number of laps. </li>
|
90 |
-
<li>Career Mode: You can start from the bottom and work your way up to become a racing legend. You can compete in different championships and events, earn money and fame, and unlock new cars and upgrades. </li>
|
91 |
-
<li>Challenge Mode: You can test your skills and challenge yourself in various scenarios, such as time trial, race, drift, and drag. You can earn trophies by completing objectives and unlocking the next challenge. </li>
|
92 |
-
<li>Online Mode: You can race against other players from around the world in real-time multiplayer mode. You can join or create a lobby, choose the track and the car, and compete for the best time and position. You can also see how you rank against other players on the leaderboard and earn achievements. </li>
|
93 |
-
</ul>
|
94 |
-
<h2>How to download and install Shell Racing Legends APK?</h2>
|
95 |
-
<p>If you want to download and install Shell Racing Legends APK on your Android device, you need to follow these steps:</p>
|
96 |
-
<h3>Requirements for Shell Racing Legends APK</h3>
|
97 |
-
<p>Before you download and install Shell Racing Legends APK, you need to make sure that your device meets these requirements:</p>
|
98 |
-
<ul>
|
99 |
-
<li>Your device must have Android 4.4 or higher. </li>
|
100 |
-
<li>Your device must have at least 2 GB of RAM and 500 MB of free storage space. </li>
|
101 |
-
<li>Your device must have Bluetooth enabled and compatible with the Shell Road Toy Cars. </li>
|
102 |
-
<li>Your device must have a stable internet connection for online mode. </li>
|
103 |
-
</ul>
|
104 |
-
<h3>Steps to download and install Shell Racing Legends APK</h3>
|
105 |
-
<p>After you have checked the requirements, you can follow these steps to download and install Shell Racing Legends APK:</p>
|
106 |
-
<ol>
|
107 |
-
<li>Go to the official website of Shell Racing Legends APK or click on this link: <a href="(^i^)">Shell Racing Legends APK Download</a>. </li>
|
108 |
-
<li>Click on the download button and wait for the APK file to be downloaded on your device.</li>
|
109 |
-
<li>Go to your device settings and enable the installation of apps from unknown sources.</li>
|
110 |
-
<li>Locate the downloaded APK file on your device and tap on it to start the installation process.</li>
|
111 |
-
<li>Follow the instructions on the screen and wait for the installation to be completed.</li>
|
112 |
-
<li>Launch the app and enjoy playing Shell Racing Legends APK.</li>
|
113 |
-
</ol>
|
114 |
-
<h2>Conclusion</h2>
|
115 |
-
<p>Shell Racing Legends APK is a racing game that lets you collect, connect, and compete with an exclusive Ferrari car collection. The game has high-quality graphics and sound, realistic physics and gameplay, exclusive Ferrari car collection, different tracks and modes, leaderboard and achievements, and more. The game also allows you to experience both the real and digital worlds of racing by connecting your Shell Road Toy Cars with Bluetooth to your device. If you are a fan of Ferrari cars and racing games, you should definitely download Shell Racing Legends APK and enjoy the thrill of driving these legendary cars.</p>
|
116 |
-
<h2>Frequently Asked Questions</h2>
|
117 |
-
<p>Here are some of the frequently asked questions about Shell Racing Legends APK:</p>
|
118 |
-
<h4>Q: How much does Shell Racing Legends APK cost?</h4>
|
119 |
-
<p>A: Shell Racing Legends APK is free to download and play. However, some in-game items may require real money to purchase.</p>
|
120 |
-
<h4>Q: How can I get more Shell Road Toy Cars?</h4>
|
121 |
-
<p>A: You can buy more Shell Road Toy Cars at participating Shell gas stations around the world. Each car costs US$ 10 or equivalent in local currency.</p>
|
122 |
-
<h4>Q: How can I connect my Shell Road Toy Car with my device?</h4>
|
123 |
-
<p>A: You need to turn on Bluetooth on your device and pair it with your car. Then, you need to launch the app and scan your car with the app's camera. Once your car is recognized by the app , you can start driving it with your device as a remote controller. </p>
|
124 |
-
<h4>Q: How can I customize my cars in Shell Racing Legends APK?</h4>
|
125 |
-
<p>A: You can customize your cars by going to your garage and tapping on the car you want to modify. You can change the color, the decals, and the upgrades of your car. You can also view the history and key statistics of your car in your garage.</p>
|
126 |
-
<h4>Q: How can I play online mode in Shell Racing Legends APK?</h4>
|
127 |
-
<p>A: You can play online mode by going to the main menu and tapping on the online mode button. You can join or create a lobby, choose the track and the car, and race against other players from around the world. You can also see how you rank against other players on the leaderboard and earn achievements.</p>
|
128 |
-
<h4>Q: How can I contact the developer of Shell Racing Legends APK?</h4>
|
129 |
-
<p>A: You can contact the developer of Shell Racing Legends APK by sending an email to carbon12011@gmail.com or by visiting their website at https://carbon12011.com/.</p> 401be4b1e0<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py
DELETED
@@ -1,459 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import paddle
|
19 |
-
import PIL.Image
|
20 |
-
|
21 |
-
from paddlenlp.transformers import (
|
22 |
-
CLIPFeatureExtractor,
|
23 |
-
CLIPTextModelWithProjection,
|
24 |
-
CLIPTokenizer,
|
25 |
-
CLIPVisionModelWithProjection,
|
26 |
-
)
|
27 |
-
|
28 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
29 |
-
from ...pipeline_utils import DiffusionPipeline
|
30 |
-
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
31 |
-
from ...utils import logging
|
32 |
-
from .modeling_text_unet import UNetFlatConditionModel
|
33 |
-
from .pipeline_versatile_diffusion_dual_guided import (
|
34 |
-
VersatileDiffusionDualGuidedPipeline,
|
35 |
-
)
|
36 |
-
from .pipeline_versatile_diffusion_image_variation import (
|
37 |
-
VersatileDiffusionImageVariationPipeline,
|
38 |
-
)
|
39 |
-
from .pipeline_versatile_diffusion_text_to_image import (
|
40 |
-
VersatileDiffusionTextToImagePipeline,
|
41 |
-
)
|
42 |
-
|
43 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
44 |
-
|
45 |
-
|
46 |
-
class VersatileDiffusionPipeline(DiffusionPipeline):
|
47 |
-
r"""
|
48 |
-
Pipeline for generation using Versatile Diffusion.
|
49 |
-
|
50 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
51 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
52 |
-
|
53 |
-
Args:
|
54 |
-
vae ([`AutoencoderKL`]):
|
55 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
56 |
-
text_encoder ([`CLIPTextModelWithProjection`]):
|
57 |
-
Frozen text-encoder. Versatile Diffusion uses the text portion of
|
58 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically
|
59 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
60 |
-
image_encoder ([`CLIPVisionModelWithProjection`]):
|
61 |
-
Frozen vision-encoder. Versatile Diffusion uses the vision portion of
|
62 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection), specifically
|
63 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
64 |
-
tokenizer (`CLIPTokenizer`):
|
65 |
-
Tokenizer of class
|
66 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
67 |
-
image_unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
68 |
-
text_unet ([`UNetFlatConditionModel`]): xxx.
|
69 |
-
scheduler ([`SchedulerMixin`]):
|
70 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
71 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
72 |
-
image_feature_extractor ([`CLIPFeatureExtractor`]):
|
73 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
74 |
-
"""
|
75 |
-
|
76 |
-
tokenizer: CLIPTokenizer
|
77 |
-
image_feature_extractor: CLIPFeatureExtractor
|
78 |
-
text_encoder: CLIPTextModelWithProjection
|
79 |
-
image_encoder: CLIPVisionModelWithProjection
|
80 |
-
image_unet: UNet2DConditionModel
|
81 |
-
text_unet: UNetFlatConditionModel
|
82 |
-
vae: AutoencoderKL
|
83 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
|
84 |
-
|
85 |
-
def __init__(
|
86 |
-
self,
|
87 |
-
tokenizer: CLIPTokenizer,
|
88 |
-
image_feature_extractor: CLIPFeatureExtractor,
|
89 |
-
text_encoder: CLIPTextModelWithProjection,
|
90 |
-
image_encoder: CLIPVisionModelWithProjection,
|
91 |
-
image_unet: UNet2DConditionModel,
|
92 |
-
text_unet: UNetFlatConditionModel,
|
93 |
-
vae: AutoencoderKL,
|
94 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
95 |
-
):
|
96 |
-
super().__init__()
|
97 |
-
|
98 |
-
self.register_modules(
|
99 |
-
tokenizer=tokenizer,
|
100 |
-
image_feature_extractor=image_feature_extractor,
|
101 |
-
text_encoder=text_encoder,
|
102 |
-
image_encoder=image_encoder,
|
103 |
-
image_unet=image_unet,
|
104 |
-
text_unet=text_unet,
|
105 |
-
vae=vae,
|
106 |
-
scheduler=scheduler,
|
107 |
-
)
|
108 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
109 |
-
|
110 |
-
@paddle.no_grad()
|
111 |
-
def image_variation(
|
112 |
-
self,
|
113 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
114 |
-
height: Optional[int] = None,
|
115 |
-
width: Optional[int] = None,
|
116 |
-
num_inference_steps: int = 50,
|
117 |
-
guidance_scale: float = 7.5,
|
118 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
119 |
-
num_images_per_prompt: Optional[int] = 1,
|
120 |
-
eta: float = 0.0,
|
121 |
-
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
|
122 |
-
latents: Optional[paddle.Tensor] = None,
|
123 |
-
output_type: Optional[str] = "pil",
|
124 |
-
return_dict: bool = True,
|
125 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
126 |
-
callback_steps: Optional[int] = 1,
|
127 |
-
):
|
128 |
-
r"""
|
129 |
-
Function invoked when calling the pipeline for generation.
|
130 |
-
|
131 |
-
Args:
|
132 |
-
image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`):
|
133 |
-
The image prompt or prompts to guide the image generation.
|
134 |
-
height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
135 |
-
The height in pixels of the generated image.
|
136 |
-
width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
137 |
-
The width in pixels of the generated image.
|
138 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
139 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
140 |
-
expense of slower inference.
|
141 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
142 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
143 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
144 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
145 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
146 |
-
usually at the expense of lower image quality.
|
147 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
148 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
149 |
-
if `guidance_scale` is less than `1`).
|
150 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
151 |
-
The number of images to generate per prompt.
|
152 |
-
eta (`float`, *optional*, defaults to 0.0):
|
153 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
154 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
155 |
-
generator (`paddle.Generator`, *optional*):
|
156 |
-
A [paddle generator] to make generation
|
157 |
-
deterministic.
|
158 |
-
latents (`paddle.Tensor`, *optional*):
|
159 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
160 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
161 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
162 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
163 |
-
The output format of the generate image. Choose between
|
164 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
165 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
166 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
167 |
-
plain tuple.
|
168 |
-
callback (`Callable`, *optional*):
|
169 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
170 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
171 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
172 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
173 |
-
called at every step.
|
174 |
-
|
175 |
-
Examples:
|
176 |
-
|
177 |
-
```py
|
178 |
-
>>> from ppdiffusers import VersatileDiffusionPipeline
|
179 |
-
>>> import paddle
|
180 |
-
>>> import requests
|
181 |
-
>>> from io import BytesIO
|
182 |
-
>>> from PIL import Image
|
183 |
-
|
184 |
-
>>> # let's download an initial image
|
185 |
-
>>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg"
|
186 |
-
|
187 |
-
>>> response = requests.get(url)
|
188 |
-
>>> image = Image.open(BytesIO(response.content)).convert("RGB")
|
189 |
-
|
190 |
-
>>> pipe = VersatileDiffusionPipeline.from_pretrained(
|
191 |
-
... "shi-labs/versatile-diffusion"
|
192 |
-
... )
|
193 |
-
|
194 |
-
>>> generator = paddle.Generator().manual_seed(0)
|
195 |
-
>>> image = pipe.image_variation(image, generator=generator).images[0]
|
196 |
-
>>> image.save("./car_variation.png")
|
197 |
-
```
|
198 |
-
|
199 |
-
Returns:
|
200 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
201 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
202 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
203 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
204 |
-
(nsfw) content, according to the `safety_checker`.
|
205 |
-
"""
|
206 |
-
expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys()
|
207 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
208 |
-
return VersatileDiffusionImageVariationPipeline(**components)(
|
209 |
-
image=image,
|
210 |
-
height=height,
|
211 |
-
width=width,
|
212 |
-
num_inference_steps=num_inference_steps,
|
213 |
-
guidance_scale=guidance_scale,
|
214 |
-
negative_prompt=negative_prompt,
|
215 |
-
num_images_per_prompt=num_images_per_prompt,
|
216 |
-
eta=eta,
|
217 |
-
generator=generator,
|
218 |
-
latents=latents,
|
219 |
-
output_type=output_type,
|
220 |
-
return_dict=return_dict,
|
221 |
-
callback=callback,
|
222 |
-
callback_steps=callback_steps,
|
223 |
-
)
|
224 |
-
|
225 |
-
@paddle.no_grad()
|
226 |
-
def text_to_image(
|
227 |
-
self,
|
228 |
-
prompt: Union[str, List[str]],
|
229 |
-
height: Optional[int] = None,
|
230 |
-
width: Optional[int] = None,
|
231 |
-
num_inference_steps: int = 50,
|
232 |
-
guidance_scale: float = 7.5,
|
233 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
234 |
-
num_images_per_prompt: Optional[int] = 1,
|
235 |
-
eta: float = 0.0,
|
236 |
-
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
|
237 |
-
latents: Optional[paddle.Tensor] = None,
|
238 |
-
output_type: Optional[str] = "pil",
|
239 |
-
return_dict: bool = True,
|
240 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
241 |
-
callback_steps: Optional[int] = 1,
|
242 |
-
):
|
243 |
-
r"""
|
244 |
-
Function invoked when calling the pipeline for generation.
|
245 |
-
|
246 |
-
Args:
|
247 |
-
prompt (`str` or `List[str]`):
|
248 |
-
The prompt or prompts to guide the image generation.
|
249 |
-
height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
250 |
-
The height in pixels of the generated image.
|
251 |
-
width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
252 |
-
The width in pixels of the generated image.
|
253 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
254 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
255 |
-
expense of slower inference.
|
256 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
257 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
258 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
259 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
260 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
261 |
-
usually at the expense of lower image quality.
|
262 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
263 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
264 |
-
if `guidance_scale` is less than `1`).
|
265 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
266 |
-
The number of images to generate per prompt.
|
267 |
-
eta (`float`, *optional*, defaults to 0.0):
|
268 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
269 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
270 |
-
generator (`paddle.Generator`, *optional*):
|
271 |
-
A [paddle generator] to make generation
|
272 |
-
deterministic.
|
273 |
-
latents (`paddle.Tensor`, *optional*):
|
274 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
275 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
276 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
277 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
278 |
-
The output format of the generate image. Choose between
|
279 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
280 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
281 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
282 |
-
plain tuple.
|
283 |
-
callback (`Callable`, *optional*):
|
284 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
285 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
286 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
287 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
288 |
-
called at every step.
|
289 |
-
|
290 |
-
Examples:
|
291 |
-
|
292 |
-
```py
|
293 |
-
>>> from ppdiffusers import VersatileDiffusionPipeline
|
294 |
-
>>> import paddle
|
295 |
-
|
296 |
-
>>> pipe = VersatileDiffusionPipeline.from_pretrained(
|
297 |
-
... "shi-labs/versatile-diffusion"
|
298 |
-
... )
|
299 |
-
|
300 |
-
>>> generator = paddle.Generator().manual_seed(0)
|
301 |
-
>>> image = pipe.text_to_image("an astronaut riding on a horse on mars", generator=generator).images[0]
|
302 |
-
>>> image.save("./astronaut.png")
|
303 |
-
```
|
304 |
-
|
305 |
-
Returns:
|
306 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
307 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
308 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
309 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
310 |
-
(nsfw) content, according to the `safety_checker`.
|
311 |
-
"""
|
312 |
-
expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys()
|
313 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
314 |
-
temp_pipeline = VersatileDiffusionTextToImagePipeline(**components)
|
315 |
-
output = temp_pipeline(
|
316 |
-
prompt=prompt,
|
317 |
-
height=height,
|
318 |
-
width=width,
|
319 |
-
num_inference_steps=num_inference_steps,
|
320 |
-
guidance_scale=guidance_scale,
|
321 |
-
negative_prompt=negative_prompt,
|
322 |
-
num_images_per_prompt=num_images_per_prompt,
|
323 |
-
eta=eta,
|
324 |
-
generator=generator,
|
325 |
-
latents=latents,
|
326 |
-
output_type=output_type,
|
327 |
-
return_dict=return_dict,
|
328 |
-
callback=callback,
|
329 |
-
callback_steps=callback_steps,
|
330 |
-
)
|
331 |
-
# swap the attention blocks back to the original state
|
332 |
-
temp_pipeline._swap_unet_attention_blocks()
|
333 |
-
|
334 |
-
return output
|
335 |
-
|
336 |
-
@paddle.no_grad()
|
337 |
-
def dual_guided(
|
338 |
-
self,
|
339 |
-
prompt: Union[PIL.Image.Image, List[PIL.Image.Image]],
|
340 |
-
image: Union[str, List[str]],
|
341 |
-
text_to_image_strength: float = 0.5,
|
342 |
-
height: Optional[int] = None,
|
343 |
-
width: Optional[int] = None,
|
344 |
-
num_inference_steps: int = 50,
|
345 |
-
guidance_scale: float = 7.5,
|
346 |
-
num_images_per_prompt: Optional[int] = 1,
|
347 |
-
eta: float = 0.0,
|
348 |
-
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
|
349 |
-
latents: Optional[paddle.Tensor] = None,
|
350 |
-
output_type: Optional[str] = "pil",
|
351 |
-
return_dict: bool = True,
|
352 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
353 |
-
callback_steps: Optional[int] = 1,
|
354 |
-
):
|
355 |
-
r"""
|
356 |
-
Function invoked when calling the pipeline for generation.
|
357 |
-
|
358 |
-
Args:
|
359 |
-
prompt (`str` or `List[str]`):
|
360 |
-
The prompt or prompts to guide the image generation.
|
361 |
-
height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
362 |
-
The height in pixels of the generated image.
|
363 |
-
width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
|
364 |
-
The width in pixels of the generated image.
|
365 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
366 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
367 |
-
expense of slower inference.
|
368 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
369 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
370 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
371 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
372 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
373 |
-
usually at the expense of lower image quality.
|
374 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
375 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
376 |
-
if `guidance_scale` is less than `1`).
|
377 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
378 |
-
The number of images to generate per prompt.
|
379 |
-
eta (`float`, *optional*, defaults to 0.0):
|
380 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
381 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
382 |
-
generator (`paddle.Generator`, *optional*):
|
383 |
-
A [paddle generator] to make generation
|
384 |
-
deterministic.
|
385 |
-
latents (`paddle.Tensor`, *optional*):
|
386 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
387 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
388 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
389 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
390 |
-
The output format of the generate image. Choose between
|
391 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
392 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
393 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
394 |
-
plain tuple.
|
395 |
-
callback (`Callable`, *optional*):
|
396 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
397 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
398 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
399 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
400 |
-
called at every step.
|
401 |
-
|
402 |
-
Examples:
|
403 |
-
|
404 |
-
```py
|
405 |
-
>>> from ppdiffusers import VersatileDiffusionPipeline
|
406 |
-
>>> import paddle
|
407 |
-
>>> import requests
|
408 |
-
>>> from io import BytesIO
|
409 |
-
>>> from PIL import Image
|
410 |
-
|
411 |
-
>>> # let's download an initial image
|
412 |
-
>>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg"
|
413 |
-
|
414 |
-
>>> response = requests.get(url)
|
415 |
-
>>> image = Image.open(BytesIO(response.content)).convert("RGB")
|
416 |
-
>>> text = "a red car in the sun"
|
417 |
-
|
418 |
-
>>> pipe = VersatileDiffusionPipeline.from_pretrained(
|
419 |
-
... "shi-labs/versatile-diffusion"
|
420 |
-
... )
|
421 |
-
|
422 |
-
>>> generator = paddle.Generator().manual_seed(0)
|
423 |
-
>>> text_to_image_strength = 0.75
|
424 |
-
|
425 |
-
>>> image = pipe.dual_guided(
|
426 |
-
... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator
|
427 |
-
... ).images[0]
|
428 |
-
>>> image.save("./car_variation.png")
|
429 |
-
```
|
430 |
-
|
431 |
-
Returns:
|
432 |
-
[`~pipelines.stable_diffusion.ImagePipelineOutput`] or `tuple`:
|
433 |
-
[`~pipelines.stable_diffusion.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
|
434 |
-
returning a tuple, the first element is a list with the generated images.
|
435 |
-
"""
|
436 |
-
|
437 |
-
expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys()
|
438 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
439 |
-
temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components)
|
440 |
-
output = temp_pipeline(
|
441 |
-
prompt=prompt,
|
442 |
-
image=image,
|
443 |
-
text_to_image_strength=text_to_image_strength,
|
444 |
-
height=height,
|
445 |
-
width=width,
|
446 |
-
num_inference_steps=num_inference_steps,
|
447 |
-
guidance_scale=guidance_scale,
|
448 |
-
num_images_per_prompt=num_images_per_prompt,
|
449 |
-
eta=eta,
|
450 |
-
generator=generator,
|
451 |
-
latents=latents,
|
452 |
-
output_type=output_type,
|
453 |
-
return_dict=return_dict,
|
454 |
-
callback=callback,
|
455 |
-
callback_steps=callback_steps,
|
456 |
-
)
|
457 |
-
temp_pipeline._revert_dual_attention()
|
458 |
-
|
459 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine.py
DELETED
@@ -1,502 +0,0 @@
|
|
1 |
-
import threading
|
2 |
-
from itertools import chain
|
3 |
-
from typing import List, Optional, Tuple
|
4 |
-
|
5 |
-
import numpy
|
6 |
-
from scipy.signal import resample
|
7 |
-
|
8 |
-
from ..acoustic_feature_extractor import OjtPhoneme
|
9 |
-
from ..model import AccentPhrase, AudioQuery, Mora
|
10 |
-
from .core_wrapper import CoreWrapper, OldCoreError
|
11 |
-
from .synthesis_engine_base import SynthesisEngineBase
|
12 |
-
|
13 |
-
unvoiced_mora_phoneme_list = ["A", "I", "U", "E", "O", "cl", "pau"]
|
14 |
-
mora_phoneme_list = ["a", "i", "u", "e", "o", "N"] + unvoiced_mora_phoneme_list
|
15 |
-
|
16 |
-
|
17 |
-
# TODO: move mora utility to mora module
|
18 |
-
def to_flatten_moras(accent_phrases: List[AccentPhrase]) -> List[Mora]:
|
19 |
-
"""
|
20 |
-
accent_phrasesに含まれるMora(とpause_moraがあればそれも)を
|
21 |
-
すべて一つのリストに結合する
|
22 |
-
Parameters
|
23 |
-
----------
|
24 |
-
accent_phrases : List[AccentPhrase]
|
25 |
-
AccentPhraseのリスト
|
26 |
-
Returns
|
27 |
-
-------
|
28 |
-
moras : List[Mora]
|
29 |
-
結合されたMoraのリストを返す
|
30 |
-
"""
|
31 |
-
return list(
|
32 |
-
chain.from_iterable(
|
33 |
-
accent_phrase.moras
|
34 |
-
+ (
|
35 |
-
[accent_phrase.pause_mora]
|
36 |
-
if accent_phrase.pause_mora is not None
|
37 |
-
else []
|
38 |
-
)
|
39 |
-
for accent_phrase in accent_phrases
|
40 |
-
)
|
41 |
-
)
|
42 |
-
|
43 |
-
|
44 |
-
def to_phoneme_data_list(phoneme_str_list: List[str]):
|
45 |
-
"""
|
46 |
-
phoneme文字列のリストを、OjtPhonemeクラスのリストに変換する
|
47 |
-
Parameters
|
48 |
-
----------
|
49 |
-
phoneme_str_list : List[str]
|
50 |
-
phoneme文字列のリスト
|
51 |
-
Returns
|
52 |
-
-------
|
53 |
-
phoneme_list : List[OjtPhoneme]
|
54 |
-
変換されたOjtPhonemeクラスのリスト
|
55 |
-
"""
|
56 |
-
phoneme_data_list = [
|
57 |
-
OjtPhoneme(phoneme=p, start=i, end=i + 1)
|
58 |
-
for i, p in enumerate(phoneme_str_list)
|
59 |
-
]
|
60 |
-
phoneme_data_list = OjtPhoneme.convert(phoneme_data_list)
|
61 |
-
return phoneme_data_list
|
62 |
-
|
63 |
-
|
64 |
-
def split_mora(phoneme_list: List[OjtPhoneme]):
|
65 |
-
"""
|
66 |
-
OjtPhonemeのリストから、
|
67 |
-
母音の位置(vowel_indexes)
|
68 |
-
母音の音素列(vowel_phoneme_list)
|
69 |
-
子音の音素列(consonant_phoneme_list)
|
70 |
-
を生成し、返す
|
71 |
-
Parameters
|
72 |
-
----------
|
73 |
-
phoneme_list : List[OjtPhoneme]
|
74 |
-
phonemeクラスのリスト
|
75 |
-
Returns
|
76 |
-
-------
|
77 |
-
consonant_phoneme_list : List[OjtPhoneme]
|
78 |
-
子音の音素列
|
79 |
-
vowel_phoneme_list : List[OjtPhoneme]
|
80 |
-
母音の音素列
|
81 |
-
vowel_indexes : : List[int]
|
82 |
-
母音の位置
|
83 |
-
"""
|
84 |
-
vowel_indexes = [
|
85 |
-
i for i, p in enumerate(phoneme_list) if p.phoneme in mora_phoneme_list
|
86 |
-
]
|
87 |
-
vowel_phoneme_list = [phoneme_list[i] for i in vowel_indexes]
|
88 |
-
# postとprevのvowel_indexの差として考えられる値は1か2
|
89 |
-
# 理由としてはphoneme_listは、consonant、vowelの組み合わせか、vowel一つの連続であるから
|
90 |
-
# 1の場合はconsonant(子音)が存在しない=母音のみ(a/i/u/e/o/N/cl/pau)で構成されるモーラ(音)である
|
91 |
-
# 2の場合はconsonantが存在するモーラである
|
92 |
-
# なので、2の場合(else)でphonemeを取り出している
|
93 |
-
consonant_phoneme_list: List[Optional[OjtPhoneme]] = [None] + [
|
94 |
-
None if post - prev == 1 else phoneme_list[post - 1]
|
95 |
-
for prev, post in zip(vowel_indexes[:-1], vowel_indexes[1:])
|
96 |
-
]
|
97 |
-
return consonant_phoneme_list, vowel_phoneme_list, vowel_indexes
|
98 |
-
|
99 |
-
|
100 |
-
def pre_process(
|
101 |
-
accent_phrases: List[AccentPhrase],
|
102 |
-
) -> Tuple[List[Mora], List[OjtPhoneme]]:
|
103 |
-
"""
|
104 |
-
AccentPhraseモデルのリストを整形し、処理に必要なデータの原型を作り出す
|
105 |
-
Parameters
|
106 |
-
----------
|
107 |
-
accent_phrases : List[AccentPhrase]
|
108 |
-
AccentPhraseモデルのリスト
|
109 |
-
Returns
|
110 |
-
-------
|
111 |
-
flatten_moras : List[Mora]
|
112 |
-
AccentPhraseモデルのリスト内に含まれるすべてのMoraをリスト化したものを返す
|
113 |
-
phoneme_data_list : List[OjtPhoneme]
|
114 |
-
flatten_morasから取り出したすべてのPhonemeをOjtPhonemeに変換したものを返す
|
115 |
-
"""
|
116 |
-
flatten_moras = to_flatten_moras(accent_phrases)
|
117 |
-
|
118 |
-
phoneme_each_mora = [
|
119 |
-
([mora.consonant] if mora.consonant is not None else []) + [mora.vowel]
|
120 |
-
for mora in flatten_moras
|
121 |
-
]
|
122 |
-
phoneme_str_list = list(chain.from_iterable(phoneme_each_mora))
|
123 |
-
phoneme_str_list = ["pau"] + phoneme_str_list + ["pau"]
|
124 |
-
|
125 |
-
phoneme_data_list = to_phoneme_data_list(phoneme_str_list)
|
126 |
-
|
127 |
-
return flatten_moras, phoneme_data_list
|
128 |
-
|
129 |
-
|
130 |
-
class SynthesisEngine(SynthesisEngineBase):
|
131 |
-
def __init__(
|
132 |
-
self,
|
133 |
-
core: CoreWrapper,
|
134 |
-
):
|
135 |
-
"""
|
136 |
-
core.yukarin_s_forward: 音素列から、音素ごとの長さを求める関数
|
137 |
-
length: 音素列の長さ
|
138 |
-
phoneme_list: 音素列
|
139 |
-
speaker_id: 話者番号
|
140 |
-
return: 音素ごとの長さ
|
141 |
-
|
142 |
-
core.yukarin_sa_forward: モーラごとの音素列とアクセント情報から、モーラごとの音高を求める関数
|
143 |
-
length: モーラ列の長さ
|
144 |
-
vowel_phoneme_list: 母音の音素列
|
145 |
-
consonant_phoneme_list: 子音の音素列
|
146 |
-
start_accent_list: アクセントの開始位置
|
147 |
-
end_accent_list: アクセントの終了位置
|
148 |
-
start_accent_phrase_list: アクセント句の開始位置
|
149 |
-
end_accent_phrase_list: アクセント句の終了位置
|
150 |
-
speaker_id: 話者番号
|
151 |
-
return: モーラごとの音高
|
152 |
-
|
153 |
-
core.decode_forward: フレームごとの音素と音高から波形を求める関数
|
154 |
-
length: フレームの長さ
|
155 |
-
phoneme_size: 音素の種類数
|
156 |
-
f0: フレームごとの音高
|
157 |
-
phoneme: フレームごとの音素
|
158 |
-
speaker_id: 話者番号
|
159 |
-
return: 音声波形
|
160 |
-
|
161 |
-
speakers: coreから取得したspeakersに関するjsonデータの文字列
|
162 |
-
|
163 |
-
supported_devices:
|
164 |
-
coreから取得した対応デバイスに関するjsonデータの文字列
|
165 |
-
Noneの場合はコアが情報の取得に対応していないため、対応デバイスは不明
|
166 |
-
"""
|
167 |
-
super().__init__()
|
168 |
-
self.core = core
|
169 |
-
self._speakers = self.core.metas()
|
170 |
-
self.mutex = threading.Lock()
|
171 |
-
try:
|
172 |
-
self._supported_devices = self.core.supported_devices()
|
173 |
-
except OldCoreError:
|
174 |
-
self._supported_devices = None
|
175 |
-
self.default_sampling_rate = 24000
|
176 |
-
|
177 |
-
@property
|
178 |
-
def speakers(self) -> str:
|
179 |
-
return self._speakers
|
180 |
-
|
181 |
-
@property
|
182 |
-
def supported_devices(self) -> Optional[str]:
|
183 |
-
return self._supported_devices
|
184 |
-
|
185 |
-
def initialize_speaker_synthesis(self, speaker_id: int, skip_reinit: bool):
|
186 |
-
try:
|
187 |
-
with self.mutex:
|
188 |
-
# 以下の条件のいずれかを満たす場合, 初期化を実行する
|
189 |
-
# 1. 引数 skip_reinit が False の場合
|
190 |
-
# 2. 話者が初期化されていない場合
|
191 |
-
if (not skip_reinit) or (not self.core.is_model_loaded(speaker_id)):
|
192 |
-
self.core.load_model(speaker_id)
|
193 |
-
except OldCoreError:
|
194 |
-
pass # コアが古い場合はどうしようもないので何もしない
|
195 |
-
|
196 |
-
def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool:
|
197 |
-
try:
|
198 |
-
return self.core.is_model_loaded(speaker_id)
|
199 |
-
except OldCoreError:
|
200 |
-
return True # コアが古い場合はどうしようもないのでTrueを返す
|
201 |
-
|
202 |
-
def replace_phoneme_length(
|
203 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
204 |
-
) -> List[AccentPhrase]:
|
205 |
-
"""
|
206 |
-
accent_phrasesの母音・子音の長さを設定する
|
207 |
-
Parameters
|
208 |
-
----------
|
209 |
-
accent_phrases : List[AccentPhrase]
|
210 |
-
アクセント句モデルのリスト
|
211 |
-
speaker_id : int
|
212 |
-
話者ID
|
213 |
-
Returns
|
214 |
-
-------
|
215 |
-
accent_phrases : List[AccentPhrase]
|
216 |
-
母音・子音の長さが設定されたアクセント句モデルのリスト
|
217 |
-
"""
|
218 |
-
# モデルがロードされていない場合はロードする
|
219 |
-
self.initialize_speaker_synthesis(speaker_id, skip_reinit=True)
|
220 |
-
# phoneme
|
221 |
-
# AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする
|
222 |
-
flatten_moras, phoneme_data_list = pre_process(accent_phrases)
|
223 |
-
# OjtPhonemeの形に分解されたもの(phoneme_data_list)から、vowel(母音)の位置を抜き出す
|
224 |
-
_, _, vowel_indexes_data = split_mora(phoneme_data_list)
|
225 |
-
|
226 |
-
# yukarin_s
|
227 |
-
# OjtPhonemeのリストからOjtPhonemeのPhoneme ID(OpenJTalkにおける音素のID)のリストを作る
|
228 |
-
phoneme_list_s = numpy.array(
|
229 |
-
[p.phoneme_id for p in phoneme_data_list], dtype=numpy.int64
|
230 |
-
)
|
231 |
-
# Phoneme IDのリスト(phoneme_list_s)をyukarin_s_forwardにかけ、推論器によって適切な音素の長さを割り当てる
|
232 |
-
with self.mutex:
|
233 |
-
phoneme_length = self.core.yukarin_s_forward(
|
234 |
-
length=len(phoneme_list_s),
|
235 |
-
phoneme_list=phoneme_list_s,
|
236 |
-
speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1),
|
237 |
-
)
|
238 |
-
|
239 |
-
# yukarin_s_forwarderの結果をaccent_phrasesに反映する
|
240 |
-
# flatten_moras変数に展開された値を変更することでコード量を削減しつつaccent_phrases内のデータを書き換えている
|
241 |
-
for i, mora in enumerate(flatten_moras):
|
242 |
-
mora.consonant_length = (
|
243 |
-
phoneme_length[vowel_indexes_data[i + 1] - 1]
|
244 |
-
if mora.consonant is not None
|
245 |
-
else None
|
246 |
-
)
|
247 |
-
mora.vowel_length = phoneme_length[vowel_indexes_data[i + 1]]
|
248 |
-
|
249 |
-
return accent_phrases
|
250 |
-
|
251 |
-
def replace_mora_pitch(
|
252 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
253 |
-
) -> List[AccentPhrase]:
|
254 |
-
"""
|
255 |
-
accent_phrasesの音高(ピッチ)を設定��る
|
256 |
-
Parameters
|
257 |
-
----------
|
258 |
-
accent_phrases : List[AccentPhrase]
|
259 |
-
アクセント句モデルのリスト
|
260 |
-
speaker_id : int
|
261 |
-
話者ID
|
262 |
-
Returns
|
263 |
-
-------
|
264 |
-
accent_phrases : List[AccentPhrase]
|
265 |
-
音高(ピッチ)が設定されたアクセント句モデルのリスト
|
266 |
-
"""
|
267 |
-
# モデルがロードされていない場合はロードする
|
268 |
-
self.initialize_speaker_synthesis(speaker_id, skip_reinit=True)
|
269 |
-
# numpy.concatenateが空リストだとエラーを返すのでチェック
|
270 |
-
if len(accent_phrases) == 0:
|
271 |
-
return []
|
272 |
-
|
273 |
-
# phoneme
|
274 |
-
# AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする
|
275 |
-
flatten_moras, phoneme_data_list = pre_process(accent_phrases)
|
276 |
-
|
277 |
-
# accent
|
278 |
-
def _create_one_hot(accent_phrase: AccentPhrase, position: int):
|
279 |
-
"""
|
280 |
-
単位行列(numpy.eye)を応用し、accent_phrase内でone hotな配列(リスト)を作る
|
281 |
-
例えば、accent_phraseのmorasの長さが12、positionが1なら
|
282 |
-
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
283 |
-
morasの長さが同じく12、positionが-1なら
|
284 |
-
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
|
285 |
-
のような配列を生成する
|
286 |
-
accent_phraseがpause_moraを含む場合はさらに後ろに0が足される
|
287 |
-
Parameters
|
288 |
-
----------
|
289 |
-
accent_phrase : AccentPhrase
|
290 |
-
アクセント句モデル
|
291 |
-
position : int
|
292 |
-
one hotにするindex
|
293 |
-
Returns
|
294 |
-
-------
|
295 |
-
one_hot : numpy.ndarray
|
296 |
-
one hotな配列(リスト)
|
297 |
-
"""
|
298 |
-
return numpy.r_[
|
299 |
-
numpy.eye(len(accent_phrase.moras))[position],
|
300 |
-
(0 if accent_phrase.pause_mora is not None else []),
|
301 |
-
]
|
302 |
-
|
303 |
-
# accent_phrasesから、アクセントの開始位置のリストを作る
|
304 |
-
start_accent_list = numpy.concatenate(
|
305 |
-
[
|
306 |
-
# accentはプログラミング言語におけるindexのように0始まりではなく1始まりなので、
|
307 |
-
# accentが1の場合は0番目を指定している
|
308 |
-
# accentが1ではない場合、accentはend_accent_listに用いられる
|
309 |
-
_create_one_hot(accent_phrase, 0 if accent_phrase.accent == 1 else 1)
|
310 |
-
for accent_phrase in accent_phrases
|
311 |
-
]
|
312 |
-
)
|
313 |
-
|
314 |
-
# accent_phrasesから、アクセントの終了位置のリストを作る
|
315 |
-
end_accent_list = numpy.concatenate(
|
316 |
-
[
|
317 |
-
# accentはプログラミング言語におけるindexのように0始まりではなく1始まりなので、1を引いている
|
318 |
-
_create_one_hot(accent_phrase, accent_phrase.accent - 1)
|
319 |
-
for accent_phrase in accent_phrases
|
320 |
-
]
|
321 |
-
)
|
322 |
-
|
323 |
-
# accent_phrasesから、アクセント句の開始位置のリストを作る
|
324 |
-
# これによって、yukarin_sa_forwarder内でアクセント句を区別できる
|
325 |
-
start_accent_phrase_list = numpy.concatenate(
|
326 |
-
[_create_one_hot(accent_phrase, 0) for accent_phrase in accent_phrases]
|
327 |
-
)
|
328 |
-
|
329 |
-
# accent_phrasesから、アクセント句の終了位置のリストを作る
|
330 |
-
end_accent_phrase_list = numpy.concatenate(
|
331 |
-
[_create_one_hot(accent_phrase, -1) for accent_phrase in accent_phrases]
|
332 |
-
)
|
333 |
-
|
334 |
-
# 最初と最後に0を付け加える。これによってpau(前後の無音のためのもの)を付け加えたことになる
|
335 |
-
start_accent_list = numpy.r_[0, start_accent_list, 0]
|
336 |
-
end_accent_list = numpy.r_[0, end_accent_list, 0]
|
337 |
-
start_accent_phrase_list = numpy.r_[0, start_accent_phrase_list, 0]
|
338 |
-
end_accent_phrase_list = numpy.r_[0, end_accent_phrase_list, 0]
|
339 |
-
|
340 |
-
# アクセント・アクセント句関連のデータをyukarin_sa_forwarderに渡すための最終処理、リスト内のデータをint64に変換する
|
341 |
-
start_accent_list = numpy.array(start_accent_list, dtype=numpy.int64)
|
342 |
-
end_accent_list = numpy.array(end_accent_list, dtype=numpy.int64)
|
343 |
-
start_accent_phrase_list = numpy.array(
|
344 |
-
start_accent_phrase_list, dtype=numpy.int64
|
345 |
-
)
|
346 |
-
end_accent_phrase_list = numpy.array(end_accent_phrase_list, dtype=numpy.int64)
|
347 |
-
|
348 |
-
# phonemeに関するデータを取得(変換)する
|
349 |
-
(
|
350 |
-
consonant_phoneme_data_list,
|
351 |
-
vowel_phoneme_data_list,
|
352 |
-
_,
|
353 |
-
) = split_mora(phoneme_data_list)
|
354 |
-
|
355 |
-
# yukarin_sa
|
356 |
-
# Phoneme関連のデータをyukarin_sa_forwarderに渡すための最終処理、リスト内のデータをint64に変換する
|
357 |
-
vowel_phoneme_list = numpy.array(
|
358 |
-
[p.phoneme_id for p in vowel_phoneme_data_list], dtype=numpy.int64
|
359 |
-
)
|
360 |
-
consonant_phoneme_list = numpy.array(
|
361 |
-
[
|
362 |
-
p.phoneme_id if p is not None else -1
|
363 |
-
for p in consonant_phoneme_data_list
|
364 |
-
],
|
365 |
-
dtype=numpy.int64,
|
366 |
-
)
|
367 |
-
|
368 |
-
# 今までに生成された情報をyukarin_sa_forwardにかけ、推論器によってモーラごとに適切な音高(ピッチ)を割り当てる
|
369 |
-
with self.mutex:
|
370 |
-
f0_list = self.core.yukarin_sa_forward(
|
371 |
-
length=vowel_phoneme_list.shape[0],
|
372 |
-
vowel_phoneme_list=vowel_phoneme_list[numpy.newaxis],
|
373 |
-
consonant_phoneme_list=consonant_phoneme_list[numpy.newaxis],
|
374 |
-
start_accent_list=start_accent_list[numpy.newaxis],
|
375 |
-
end_accent_list=end_accent_list[numpy.newaxis],
|
376 |
-
start_accent_phrase_list=start_accent_phrase_list[numpy.newaxis],
|
377 |
-
end_accent_phrase_list=end_accent_phrase_list[numpy.newaxis],
|
378 |
-
speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1),
|
379 |
-
)[0]
|
380 |
-
|
381 |
-
# 無声母音を含むMoraに関しては、音高(ピッチ)を0にする
|
382 |
-
for i, p in enumerate(vowel_phoneme_data_list):
|
383 |
-
if p.phoneme in unvoiced_mora_phoneme_list:
|
384 |
-
f0_list[i] = 0
|
385 |
-
|
386 |
-
# yukarin_sa_forwarderの結果をaccent_phrasesに反映する
|
387 |
-
# flatten_moras変数に展開された値を変更することでコード量を削減しつつaccent_phrases内のデータを書き換えている
|
388 |
-
for i, mora in enumerate(flatten_moras):
|
389 |
-
mora.pitch = f0_list[i + 1]
|
390 |
-
|
391 |
-
return accent_phrases
|
392 |
-
|
393 |
-
def _synthesis_impl(self, query: AudioQuery, speaker_id: int):
|
394 |
-
"""
|
395 |
-
音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う
|
396 |
-
Parameters
|
397 |
-
----------
|
398 |
-
query : AudioQuery
|
399 |
-
音声合成クエリ
|
400 |
-
speaker_id : int
|
401 |
-
話者ID
|
402 |
-
Returns
|
403 |
-
-------
|
404 |
-
wave : numpy.ndarray
|
405 |
-
音声合成結果
|
406 |
-
"""
|
407 |
-
# モデルがロードされていない場合はロードする
|
408 |
-
self.initialize_speaker_synthesis(speaker_id, skip_reinit=True)
|
409 |
-
# phoneme
|
410 |
-
# AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする
|
411 |
-
flatten_moras, phoneme_data_list = pre_process(query.accent_phrases)
|
412 |
-
|
413 |
-
# OjtPhonemeのリストからOjtPhonemeのPhoneme ID(OpenJTalkにおける音素のID)のリストを作る
|
414 |
-
phoneme_list_s = numpy.array(
|
415 |
-
[p.phoneme_id for p in phoneme_data_list], dtype=numpy.int64
|
416 |
-
)
|
417 |
-
|
418 |
-
# length
|
419 |
-
# 音素の長さをリストに展開・結合する。ここには前後の無音時間も含まれる
|
420 |
-
phoneme_length_list = (
|
421 |
-
[query.prePhonemeLength]
|
422 |
-
+ [
|
423 |
-
length
|
424 |
-
for mora in flatten_moras
|
425 |
-
for length in (
|
426 |
-
[mora.consonant_length] if mora.consonant is not None else []
|
427 |
-
)
|
428 |
-
+ [mora.vowel_length]
|
429 |
-
]
|
430 |
-
+ [query.postPhonemeLength]
|
431 |
-
)
|
432 |
-
# floatにキャスト
|
433 |
-
phoneme_length = numpy.array(phoneme_length_list, dtype=numpy.float32)
|
434 |
-
|
435 |
-
# lengthにSpeed Scale(話速)を適用する
|
436 |
-
phoneme_length /= query.speedScale
|
437 |
-
|
438 |
-
# pitch
|
439 |
-
# モーラの音高(ピッチ)を展開・結合し、floatにキャストする
|
440 |
-
f0_list = [0] + [mora.pitch for mora in flatten_moras] + [0]
|
441 |
-
f0 = numpy.array(f0_list, dtype=numpy.float32)
|
442 |
-
# 音高(ピッチ)の調節を適用する(2のPitch Scale乗を掛ける)
|
443 |
-
f0 *= 2**query.pitchScale
|
444 |
-
|
445 |
-
# 有声音素(音高(ピッチ)が0より大きいもの)か否かを抽出する
|
446 |
-
voiced = f0 > 0
|
447 |
-
# 有声音素の音高(ピッチ)の平均値を求める
|
448 |
-
mean_f0 = f0[voiced].mean()
|
449 |
-
# 平均値がNaNではないとき、抑揚を適用する
|
450 |
-
# 抑揚は音高と音高の平均値の差に抑揚を掛けたもの((f0 - mean_f0) * Intonation Scale)に抑揚の平均値(mean_f0)を足したもの
|
451 |
-
if not numpy.isnan(mean_f0):
|
452 |
-
f0[voiced] = (f0[voiced] - mean_f0) * query.intonationScale + mean_f0
|
453 |
-
|
454 |
-
# OjtPhonemeの形に分解された音素リストから、vowel(母音)の位置を抜き出し、numpyのarrayにする
|
455 |
-
_, _, vowel_indexes_data = split_mora(phoneme_data_list)
|
456 |
-
vowel_indexes = numpy.array(vowel_indexes_data)
|
457 |
-
|
458 |
-
# forward decode
|
459 |
-
# 音素の長さにrateを掛け、intにキャストする
|
460 |
-
rate = 24000 / 256
|
461 |
-
phoneme_bin_num = numpy.round(phoneme_length * rate).astype(numpy.int32)
|
462 |
-
|
463 |
-
# Phoneme IDを音素の長さ分繰り返す
|
464 |
-
phoneme = numpy.repeat(phoneme_list_s, phoneme_bin_num)
|
465 |
-
# f0を母音と子音の長さの合計分繰り返す
|
466 |
-
f0 = numpy.repeat(
|
467 |
-
f0,
|
468 |
-
[a.sum() for a in numpy.split(phoneme_bin_num, vowel_indexes[:-1] + 1)],
|
469 |
-
)
|
470 |
-
|
471 |
-
# phonemeの長さとOjtPhonemeのnum_phoneme(45)分の0で初期化された2次元配列を用意する
|
472 |
-
array = numpy.zeros((len(phoneme), OjtPhoneme.num_phoneme), dtype=numpy.float32)
|
473 |
-
# 初期化された2次元配列の各行をone hotにする
|
474 |
-
array[numpy.arange(len(phoneme)), phoneme] = 1
|
475 |
-
phoneme = array
|
476 |
-
|
477 |
-
# 今まで生成された情報をdecode_forwardにかけ、推論器によって音声波形を生成する
|
478 |
-
with self.mutex:
|
479 |
-
wave = self.core.decode_forward(
|
480 |
-
length=phoneme.shape[0],
|
481 |
-
phoneme_size=phoneme.shape[1],
|
482 |
-
f0=f0[:, numpy.newaxis],
|
483 |
-
phoneme=phoneme,
|
484 |
-
speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1),
|
485 |
-
)
|
486 |
-
|
487 |
-
# volume: ゲイン適用
|
488 |
-
wave *= query.volumeScale
|
489 |
-
|
490 |
-
# 出力サンプリングレートがデフォルト(decode forwarderによるもの、24kHz)でなければ、それを適用する
|
491 |
-
if query.outputSamplingRate != self.default_sampling_rate:
|
492 |
-
wave = resample(
|
493 |
-
wave,
|
494 |
-
query.outputSamplingRate * len(wave) // self.default_sampling_rate,
|
495 |
-
)
|
496 |
-
|
497 |
-
# ステレオ変換
|
498 |
-
# 出力設定がステレオなのであれば、ステレオ化する
|
499 |
-
if query.outputStereo:
|
500 |
-
wave = numpy.array([wave, wave]).T
|
501 |
-
|
502 |
-
return wave
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Dashboards/ScrabbleSolverWordThesaurus/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ScrabbleSolverWordThesaurus
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/dataset.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.utils.data
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
from uvr5_pack.lib_v5 import spec_utils
|
10 |
-
|
11 |
-
|
12 |
-
class VocalRemoverValidationSet(torch.utils.data.Dataset):
|
13 |
-
def __init__(self, patch_list):
|
14 |
-
self.patch_list = patch_list
|
15 |
-
|
16 |
-
def __len__(self):
|
17 |
-
return len(self.patch_list)
|
18 |
-
|
19 |
-
def __getitem__(self, idx):
|
20 |
-
path = self.patch_list[idx]
|
21 |
-
data = np.load(path)
|
22 |
-
|
23 |
-
X, y = data["X"], data["y"]
|
24 |
-
|
25 |
-
X_mag = np.abs(X)
|
26 |
-
y_mag = np.abs(y)
|
27 |
-
|
28 |
-
return X_mag, y_mag
|
29 |
-
|
30 |
-
|
31 |
-
def make_pair(mix_dir, inst_dir):
|
32 |
-
input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
|
33 |
-
|
34 |
-
X_list = sorted(
|
35 |
-
[
|
36 |
-
os.path.join(mix_dir, fname)
|
37 |
-
for fname in os.listdir(mix_dir)
|
38 |
-
if os.path.splitext(fname)[1] in input_exts
|
39 |
-
]
|
40 |
-
)
|
41 |
-
y_list = sorted(
|
42 |
-
[
|
43 |
-
os.path.join(inst_dir, fname)
|
44 |
-
for fname in os.listdir(inst_dir)
|
45 |
-
if os.path.splitext(fname)[1] in input_exts
|
46 |
-
]
|
47 |
-
)
|
48 |
-
|
49 |
-
filelist = list(zip(X_list, y_list))
|
50 |
-
|
51 |
-
return filelist
|
52 |
-
|
53 |
-
|
54 |
-
def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
|
55 |
-
if split_mode == "random":
|
56 |
-
filelist = make_pair(
|
57 |
-
os.path.join(dataset_dir, "mixtures"),
|
58 |
-
os.path.join(dataset_dir, "instruments"),
|
59 |
-
)
|
60 |
-
|
61 |
-
random.shuffle(filelist)
|
62 |
-
|
63 |
-
if len(val_filelist) == 0:
|
64 |
-
val_size = int(len(filelist) * val_rate)
|
65 |
-
train_filelist = filelist[:-val_size]
|
66 |
-
val_filelist = filelist[-val_size:]
|
67 |
-
else:
|
68 |
-
train_filelist = [
|
69 |
-
pair for pair in filelist if list(pair) not in val_filelist
|
70 |
-
]
|
71 |
-
elif split_mode == "subdirs":
|
72 |
-
if len(val_filelist) != 0:
|
73 |
-
raise ValueError(
|
74 |
-
"The `val_filelist` option is not available in `subdirs` mode"
|
75 |
-
)
|
76 |
-
|
77 |
-
train_filelist = make_pair(
|
78 |
-
os.path.join(dataset_dir, "training/mixtures"),
|
79 |
-
os.path.join(dataset_dir, "training/instruments"),
|
80 |
-
)
|
81 |
-
|
82 |
-
val_filelist = make_pair(
|
83 |
-
os.path.join(dataset_dir, "validation/mixtures"),
|
84 |
-
os.path.join(dataset_dir, "validation/instruments"),
|
85 |
-
)
|
86 |
-
|
87 |
-
return train_filelist, val_filelist
|
88 |
-
|
89 |
-
|
90 |
-
def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
|
91 |
-
perm = np.random.permutation(len(X))
|
92 |
-
for i, idx in enumerate(tqdm(perm)):
|
93 |
-
if np.random.uniform() < reduction_rate:
|
94 |
-
y[idx] = spec_utils.reduce_vocal_aggressively(
|
95 |
-
X[idx], y[idx], reduction_mask
|
96 |
-
)
|
97 |
-
|
98 |
-
if np.random.uniform() < 0.5:
|
99 |
-
# swap channel
|
100 |
-
X[idx] = X[idx, ::-1]
|
101 |
-
y[idx] = y[idx, ::-1]
|
102 |
-
if np.random.uniform() < 0.02:
|
103 |
-
# mono
|
104 |
-
X[idx] = X[idx].mean(axis=0, keepdims=True)
|
105 |
-
y[idx] = y[idx].mean(axis=0, keepdims=True)
|
106 |
-
if np.random.uniform() < 0.02:
|
107 |
-
# inst
|
108 |
-
X[idx] = y[idx]
|
109 |
-
|
110 |
-
if np.random.uniform() < mixup_rate and i < len(perm) - 1:
|
111 |
-
lam = np.random.beta(mixup_alpha, mixup_alpha)
|
112 |
-
X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
|
113 |
-
y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
|
114 |
-
|
115 |
-
return X, y
|
116 |
-
|
117 |
-
|
118 |
-
def make_padding(width, cropsize, offset):
|
119 |
-
left = offset
|
120 |
-
roi_size = cropsize - left * 2
|
121 |
-
if roi_size == 0:
|
122 |
-
roi_size = cropsize
|
123 |
-
right = roi_size - (width % roi_size) + left
|
124 |
-
|
125 |
-
return left, right, roi_size
|
126 |
-
|
127 |
-
|
128 |
-
def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
|
129 |
-
len_dataset = patches * len(filelist)
|
130 |
-
|
131 |
-
X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
132 |
-
y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
133 |
-
|
134 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
135 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
136 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
137 |
-
X, y = X / coef, y / coef
|
138 |
-
|
139 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
140 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
141 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
142 |
-
|
143 |
-
starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
|
144 |
-
ends = starts + cropsize
|
145 |
-
for j in range(patches):
|
146 |
-
idx = i * patches + j
|
147 |
-
X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
|
148 |
-
y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
|
149 |
-
|
150 |
-
return X_dataset, y_dataset
|
151 |
-
|
152 |
-
|
153 |
-
def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
|
154 |
-
patch_list = []
|
155 |
-
patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
|
156 |
-
cropsize, sr, hop_length, n_fft, offset
|
157 |
-
)
|
158 |
-
os.makedirs(patch_dir, exist_ok=True)
|
159 |
-
|
160 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
161 |
-
basename = os.path.splitext(os.path.basename(X_path))[0]
|
162 |
-
|
163 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
164 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
165 |
-
X, y = X / coef, y / coef
|
166 |
-
|
167 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
168 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
169 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
170 |
-
|
171 |
-
len_dataset = int(np.ceil(X.shape[2] / roi_size))
|
172 |
-
for j in range(len_dataset):
|
173 |
-
outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
|
174 |
-
start = j * roi_size
|
175 |
-
if not os.path.exists(outpath):
|
176 |
-
np.savez(
|
177 |
-
outpath,
|
178 |
-
X=X_pad[:, :, start : start + cropsize],
|
179 |
-
y=y_pad[:, :, start : start + cropsize],
|
180 |
-
)
|
181 |
-
patch_list.append(outpath)
|
182 |
-
|
183 |
-
return VocalRemoverValidationSet(patch_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/fs2.py
DELETED
@@ -1,250 +0,0 @@
|
|
1 |
-
from utils.hparams import hparams
|
2 |
-
from modules.commons.common_layers import *
|
3 |
-
from modules.commons.common_layers import Embedding
|
4 |
-
from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \
|
5 |
-
EnergyPredictor, FastspeechEncoder
|
6 |
-
from utils.cwt import cwt2f0
|
7 |
-
from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0
|
8 |
-
import torch.nn as nn
|
9 |
-
from modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder
|
10 |
-
FS_ENCODERS = {
|
11 |
-
'fft': lambda hp, embed_tokens, d: FastspeechEncoder(
|
12 |
-
embed_tokens, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
|
13 |
-
num_heads=hp['num_heads']),
|
14 |
-
}
|
15 |
-
|
16 |
-
FS_DECODERS = {
|
17 |
-
'fft': lambda hp: FastspeechDecoder(
|
18 |
-
hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
|
19 |
-
}
|
20 |
-
|
21 |
-
|
22 |
-
class FastSpeech2(nn.Module):
|
23 |
-
def __init__(self, dictionary, out_dims=None):
|
24 |
-
super().__init__()
|
25 |
-
self.dictionary = dictionary
|
26 |
-
self.padding_idx = dictionary.pad()
|
27 |
-
self.enc_layers = hparams['enc_layers']
|
28 |
-
self.dec_layers = hparams['dec_layers']
|
29 |
-
self.hidden_size = hparams['hidden_size']
|
30 |
-
self.encoder_embed_tokens = self.build_embedding(self.dictionary, self.hidden_size)
|
31 |
-
if hparams.get("use_bert", False):
|
32 |
-
self.ph_encoder = BERTRelTransformerEncoder(len(self.dictionary), hparams['hidden_size'], hparams['hidden_size'],
|
33 |
-
hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'],
|
34 |
-
hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln'])
|
35 |
-
else:
|
36 |
-
self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, self.encoder_embed_tokens, self.dictionary)
|
37 |
-
self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
|
38 |
-
self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims
|
39 |
-
self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
|
40 |
-
|
41 |
-
if hparams['use_spk_id']:
|
42 |
-
self.spk_embed_proj = Embedding(hparams['num_spk'] + 1, self.hidden_size)
|
43 |
-
if hparams['use_split_spk_id']:
|
44 |
-
self.spk_embed_f0 = Embedding(hparams['num_spk'] + 1, self.hidden_size)
|
45 |
-
self.spk_embed_dur = Embedding(hparams['num_spk'] + 1, self.hidden_size)
|
46 |
-
elif hparams['use_spk_embed']:
|
47 |
-
self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
|
48 |
-
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
|
49 |
-
self.dur_predictor = DurationPredictor(
|
50 |
-
self.hidden_size,
|
51 |
-
n_chans=predictor_hidden,
|
52 |
-
n_layers=hparams['dur_predictor_layers'],
|
53 |
-
dropout_rate=hparams['predictor_dropout'],
|
54 |
-
kernel_size=hparams['dur_predictor_kernel'])
|
55 |
-
self.length_regulator = LengthRegulator()
|
56 |
-
if hparams['use_pitch_embed']:
|
57 |
-
self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx)
|
58 |
-
self.pitch_predictor = PitchPredictor(
|
59 |
-
self.hidden_size,
|
60 |
-
n_chans=predictor_hidden,
|
61 |
-
n_layers=hparams['predictor_layers'],
|
62 |
-
dropout_rate=hparams['predictor_dropout'],
|
63 |
-
odim=2 if hparams['pitch_type'] == 'frame' else 1,
|
64 |
-
kernel_size=hparams['predictor_kernel'])
|
65 |
-
if hparams.get('use_energy_embed', False):
|
66 |
-
self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx)
|
67 |
-
self.energy_predictor = EnergyPredictor(
|
68 |
-
self.hidden_size,
|
69 |
-
n_chans=predictor_hidden,
|
70 |
-
n_layers=hparams['predictor_layers'],
|
71 |
-
dropout_rate=hparams['predictor_dropout'], odim=1,
|
72 |
-
kernel_size=hparams['predictor_kernel'])
|
73 |
-
|
74 |
-
def build_embedding(self, dictionary, embed_dim):
|
75 |
-
num_embeddings = len(dictionary)
|
76 |
-
emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
|
77 |
-
return emb
|
78 |
-
|
79 |
-
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
|
80 |
-
ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=False,
|
81 |
-
spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
|
82 |
-
ret = {}
|
83 |
-
if hparams.get("use_bert", False):
|
84 |
-
encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], ret=ret)
|
85 |
-
else:
|
86 |
-
encoder_out = self.encoder(txt_tokens) # [B, T, C]
|
87 |
-
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
|
88 |
-
|
89 |
-
# add ref style embed
|
90 |
-
# Not implemented
|
91 |
-
# variance encoder
|
92 |
-
var_embed = 0
|
93 |
-
|
94 |
-
# encoder_out_dur denotes encoder outputs for duration predictor
|
95 |
-
# in speech adaptation, duration predictor use old speaker embedding
|
96 |
-
if hparams['use_spk_embed']:
|
97 |
-
spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
|
98 |
-
elif hparams['use_spk_id']:
|
99 |
-
spk_embed_id = spk_embed
|
100 |
-
if spk_embed_dur_id is None:
|
101 |
-
spk_embed_dur_id = spk_embed_id
|
102 |
-
if spk_embed_f0_id is None:
|
103 |
-
spk_embed_f0_id = spk_embed_id
|
104 |
-
spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :]
|
105 |
-
spk_embed_dur = spk_embed_f0 = spk_embed
|
106 |
-
if hparams['use_split_spk_id']:
|
107 |
-
spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
|
108 |
-
spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
|
109 |
-
else:
|
110 |
-
spk_embed_dur = spk_embed_f0 = spk_embed = 0
|
111 |
-
|
112 |
-
# add dur
|
113 |
-
dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding
|
114 |
-
|
115 |
-
mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret)
|
116 |
-
|
117 |
-
decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
|
118 |
-
|
119 |
-
mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
|
120 |
-
decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
|
121 |
-
|
122 |
-
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
|
123 |
-
|
124 |
-
# add pitch and energy embed
|
125 |
-
pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
|
126 |
-
if hparams['use_pitch_embed']:
|
127 |
-
pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
|
128 |
-
decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
|
129 |
-
if hparams.get('use_energy_embed', False):
|
130 |
-
decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
|
131 |
-
|
132 |
-
ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
|
133 |
-
|
134 |
-
if skip_decoder:
|
135 |
-
return ret
|
136 |
-
ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
|
137 |
-
|
138 |
-
return ret
|
139 |
-
|
140 |
-
def add_dur(self, dur_input, mel2ph, txt_tokens, ret):
|
141 |
-
"""
|
142 |
-
|
143 |
-
:param dur_input: [B, T_txt, H]
|
144 |
-
:param mel2ph: [B, T_mel]
|
145 |
-
:param txt_tokens: [B, T_txt]
|
146 |
-
:param ret:
|
147 |
-
:return:
|
148 |
-
"""
|
149 |
-
src_padding = txt_tokens == 0
|
150 |
-
dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
|
151 |
-
if mel2ph is None:
|
152 |
-
dur, xs = self.dur_predictor.inference(dur_input, src_padding)
|
153 |
-
ret['dur'] = xs
|
154 |
-
ret['dur_choice'] = dur
|
155 |
-
mel2ph = self.length_regulator(dur, src_padding).detach()
|
156 |
-
# from modules.fastspeech.fake_modules import FakeLengthRegulator
|
157 |
-
# fake_lr = FakeLengthRegulator()
|
158 |
-
# fake_mel2ph = fake_lr(dur, (1 - src_padding.long()).sum(-1))[..., 0].detach()
|
159 |
-
# print(mel2ph == fake_mel2ph)
|
160 |
-
else:
|
161 |
-
ret['dur'] = self.dur_predictor(dur_input, src_padding)
|
162 |
-
ret['mel2ph'] = mel2ph
|
163 |
-
return mel2ph
|
164 |
-
|
165 |
-
def add_energy(self, decoder_inp, energy, ret):
|
166 |
-
decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
|
167 |
-
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
|
168 |
-
if energy is None:
|
169 |
-
energy = energy_pred
|
170 |
-
energy = torch.clamp(energy * 256 // 4, max=255).long()
|
171 |
-
energy_embed = self.energy_embed(energy)
|
172 |
-
return energy_embed
|
173 |
-
|
174 |
-
def add_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
|
175 |
-
if hparams['pitch_type'] == 'ph':
|
176 |
-
pitch_pred_inp = encoder_out.detach() + hparams['predictor_grad'] * (encoder_out - encoder_out.detach())
|
177 |
-
pitch_padding = encoder_out.sum().abs() == 0
|
178 |
-
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp)
|
179 |
-
if f0 is None:
|
180 |
-
f0 = pitch_pred[:, :, 0]
|
181 |
-
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, None, hparams, pitch_padding=pitch_padding)
|
182 |
-
pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
|
183 |
-
pitch = F.pad(pitch, [1, 0])
|
184 |
-
pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel]
|
185 |
-
pitch_embed = self.pitch_embed(pitch)
|
186 |
-
return pitch_embed
|
187 |
-
decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
|
188 |
-
|
189 |
-
pitch_padding = mel2ph == 0
|
190 |
-
|
191 |
-
if hparams['pitch_type'] == 'cwt':
|
192 |
-
pitch_padding = None
|
193 |
-
ret['cwt'] = cwt_out = self.cwt_predictor(decoder_inp)
|
194 |
-
stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]
|
195 |
-
mean = ret['f0_mean'] = stats_out[:, 0]
|
196 |
-
std = ret['f0_std'] = stats_out[:, 1]
|
197 |
-
cwt_spec = cwt_out[:, :, :10]
|
198 |
-
if f0 is None:
|
199 |
-
std = std * hparams['cwt_std_scale']
|
200 |
-
f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
|
201 |
-
if hparams['use_uv']:
|
202 |
-
assert cwt_out.shape[-1] == 11
|
203 |
-
uv = cwt_out[:, :, -1] > 0
|
204 |
-
elif hparams['pitch_ar']:
|
205 |
-
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp, f0 if self.training else None)
|
206 |
-
if f0 is None:
|
207 |
-
f0 = pitch_pred[:, :, 0]
|
208 |
-
else:
|
209 |
-
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp)
|
210 |
-
if f0 is None:
|
211 |
-
f0 = pitch_pred[:, :, 0]
|
212 |
-
if hparams['use_uv'] and uv is None:
|
213 |
-
uv = pitch_pred[:, :, 1] > 0
|
214 |
-
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding)
|
215 |
-
if pitch_padding is not None:
|
216 |
-
f0[pitch_padding] = 0
|
217 |
-
|
218 |
-
pitch = f0_to_coarse(f0_denorm) # start from 0
|
219 |
-
pitch_embed = self.pitch_embed(pitch)
|
220 |
-
return pitch_embed
|
221 |
-
|
222 |
-
def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
|
223 |
-
x = decoder_inp # [B, T, H]
|
224 |
-
x = self.decoder(x)
|
225 |
-
x = self.mel_out(x)
|
226 |
-
return x * tgt_nonpadding
|
227 |
-
|
228 |
-
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
|
229 |
-
f0 = cwt2f0(cwt_spec, mean, std, hparams['cwt_scales'])
|
230 |
-
f0 = torch.cat(
|
231 |
-
[f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
|
232 |
-
f0_norm = norm_f0(f0, None, hparams)
|
233 |
-
return f0_norm
|
234 |
-
|
235 |
-
def out2mel(self, out):
|
236 |
-
return out
|
237 |
-
|
238 |
-
@staticmethod
|
239 |
-
def mel_norm(x):
|
240 |
-
return (x + 5.5) / (6.3 / 2) - 1
|
241 |
-
|
242 |
-
@staticmethod
|
243 |
-
def mel_denorm(x):
|
244 |
-
return (x + 1) * (6.3 / 2) - 5.5
|
245 |
-
|
246 |
-
def expand_states(self, h, mel2ph):
|
247 |
-
h = F.pad(h, [0, 0, 1, 0])
|
248 |
-
mel2ph_ = mel2ph[..., None].repeat([1, 1, h.shape[-1]])
|
249 |
-
h = torch.gather(h, 1, mel2ph_) # [B, T, H]
|
250 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/sde_team_given_tests.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from typing import TYPE_CHECKING, List
|
4 |
-
|
5 |
-
from agentverse.message import Message
|
6 |
-
|
7 |
-
from . import selector_registry as SelectorRegistry
|
8 |
-
from .base import BaseSelector
|
9 |
-
|
10 |
-
import json
|
11 |
-
import re
|
12 |
-
|
13 |
-
if TYPE_CHECKING:
|
14 |
-
from agentverse.environments import BaseEnvironment
|
15 |
-
|
16 |
-
def extract(content: str, keyword: str):
|
17 |
-
result = ""
|
18 |
-
flag = False
|
19 |
-
for line in content.split('\n'):
|
20 |
-
if line.strip().startswith(keyword):
|
21 |
-
flag = True
|
22 |
-
continue
|
23 |
-
if flag:
|
24 |
-
result += line
|
25 |
-
result += "\n"
|
26 |
-
return result
|
27 |
-
|
28 |
-
|
29 |
-
@SelectorRegistry.register("sde_team_given_tests")
|
30 |
-
class SdeTeamGivenTestsSelector(BaseSelector):
|
31 |
-
def select_message(self, environment: BaseEnvironment, messages: List[Message]) -> List[Message]:
|
32 |
-
last_sender = environment.last_messages[0].sender
|
33 |
-
selected = messages
|
34 |
-
|
35 |
-
if last_sender == "code_writer":
|
36 |
-
cur_code = extract(selected[0].content, "<code>:")
|
37 |
-
environment.rule_params["code"] = cur_code
|
38 |
-
selected[0].content = f"<current code>:\n{cur_code}"
|
39 |
-
|
40 |
-
elif last_sender == "code_tester":
|
41 |
-
|
42 |
-
from .code_api import execute_unit_tests
|
43 |
-
feedback = execute_unit_tests(environment.rule_params["code"], eval(environment.unit_tests))
|
44 |
-
environment.rule_params["feedback"] = feedback
|
45 |
-
selected[0].content = f"<unit test feedback>:\n{feedback}"
|
46 |
-
|
47 |
-
f_dict = json.loads(feedback)
|
48 |
-
if f_dict["is_passing"]:
|
49 |
-
environment.rule_params["end_flag"] = True
|
50 |
-
|
51 |
-
elif last_sender == "code_reviewer":
|
52 |
-
code_review = selected[0].content
|
53 |
-
cur_code = environment.rule_params["code"]
|
54 |
-
selected[0].content = f"{code_review}"
|
55 |
-
|
56 |
-
return selected
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/utils.py
DELETED
@@ -1,542 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
import re
|
4 |
-
import sys
|
5 |
-
import argparse
|
6 |
-
import logging
|
7 |
-
import json
|
8 |
-
import subprocess
|
9 |
-
import warnings
|
10 |
-
import random
|
11 |
-
import functools
|
12 |
-
|
13 |
-
import librosa
|
14 |
-
import numpy as np
|
15 |
-
from scipy.io.wavfile import read
|
16 |
-
import torch
|
17 |
-
from torch.nn import functional as F
|
18 |
-
from modules.commons import sequence_mask
|
19 |
-
from hubert import hubert_model
|
20 |
-
|
21 |
-
MATPLOTLIB_FLAG = False
|
22 |
-
|
23 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
24 |
-
logger = logging
|
25 |
-
|
26 |
-
f0_bin = 256
|
27 |
-
f0_max = 1100.0
|
28 |
-
f0_min = 50.0
|
29 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
30 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
31 |
-
|
32 |
-
|
33 |
-
# def normalize_f0(f0, random_scale=True):
|
34 |
-
# f0_norm = f0.clone() # create a copy of the input Tensor
|
35 |
-
# batch_size, _, frame_length = f0_norm.shape
|
36 |
-
# for i in range(batch_size):
|
37 |
-
# means = torch.mean(f0_norm[i, 0, :])
|
38 |
-
# if random_scale:
|
39 |
-
# factor = random.uniform(0.8, 1.2)
|
40 |
-
# else:
|
41 |
-
# factor = 1
|
42 |
-
# f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor
|
43 |
-
# return f0_norm
|
44 |
-
# def normalize_f0(f0, random_scale=True):
|
45 |
-
# means = torch.mean(f0[:, 0, :], dim=1, keepdim=True)
|
46 |
-
# if random_scale:
|
47 |
-
# factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device)
|
48 |
-
# else:
|
49 |
-
# factor = torch.ones(f0.shape[0], 1, 1).to(f0.device)
|
50 |
-
# f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
|
51 |
-
# return f0_norm
|
52 |
-
|
53 |
-
def deprecated(func):
|
54 |
-
"""This is a decorator which can be used to mark functions
|
55 |
-
as deprecated. It will result in a warning being emitted
|
56 |
-
when the function is used."""
|
57 |
-
@functools.wraps(func)
|
58 |
-
def new_func(*args, **kwargs):
|
59 |
-
warnings.simplefilter('always', DeprecationWarning) # turn off filter
|
60 |
-
warnings.warn("Call to deprecated function {}.".format(func.__name__),
|
61 |
-
category=DeprecationWarning,
|
62 |
-
stacklevel=2)
|
63 |
-
warnings.simplefilter('default', DeprecationWarning) # reset filter
|
64 |
-
return func(*args, **kwargs)
|
65 |
-
return new_func
|
66 |
-
|
67 |
-
def normalize_f0(f0, x_mask, uv, random_scale=True):
|
68 |
-
# calculate means based on x_mask
|
69 |
-
uv_sum = torch.sum(uv, dim=1, keepdim=True)
|
70 |
-
uv_sum[uv_sum == 0] = 9999
|
71 |
-
means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
|
72 |
-
|
73 |
-
if random_scale:
|
74 |
-
factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
|
75 |
-
else:
|
76 |
-
factor = torch.ones(f0.shape[0], 1).to(f0.device)
|
77 |
-
# normalize f0 based on means and factor
|
78 |
-
f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
|
79 |
-
if torch.isnan(f0_norm).any():
|
80 |
-
exit(0)
|
81 |
-
return f0_norm * x_mask
|
82 |
-
|
83 |
-
def compute_f0_uv_torchcrepe(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512,device=None,cr_threshold=0.05):
|
84 |
-
from modules.crepe import CrepePitchExtractor
|
85 |
-
x = wav_numpy
|
86 |
-
if p_len is None:
|
87 |
-
p_len = x.shape[0]//hop_length
|
88 |
-
else:
|
89 |
-
assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
|
90 |
-
|
91 |
-
f0_min = 50
|
92 |
-
f0_max = 1100
|
93 |
-
F0Creper = CrepePitchExtractor(hop_length=hop_length,f0_min=f0_min,f0_max=f0_max,device=device,threshold=cr_threshold)
|
94 |
-
f0,uv = F0Creper(x[None,:].float(),sampling_rate,pad_to=p_len)
|
95 |
-
return f0,uv
|
96 |
-
|
97 |
-
def plot_data_to_numpy(x, y):
|
98 |
-
global MATPLOTLIB_FLAG
|
99 |
-
if not MATPLOTLIB_FLAG:
|
100 |
-
import matplotlib
|
101 |
-
matplotlib.use("Agg")
|
102 |
-
MATPLOTLIB_FLAG = True
|
103 |
-
mpl_logger = logging.getLogger('matplotlib')
|
104 |
-
mpl_logger.setLevel(logging.WARNING)
|
105 |
-
import matplotlib.pylab as plt
|
106 |
-
import numpy as np
|
107 |
-
|
108 |
-
fig, ax = plt.subplots(figsize=(10, 2))
|
109 |
-
plt.plot(x)
|
110 |
-
plt.plot(y)
|
111 |
-
plt.tight_layout()
|
112 |
-
|
113 |
-
fig.canvas.draw()
|
114 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
115 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
116 |
-
plt.close()
|
117 |
-
return data
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
def interpolate_f0(f0):
|
122 |
-
|
123 |
-
data = np.reshape(f0, (f0.size, 1))
|
124 |
-
|
125 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
126 |
-
vuv_vector[data > 0.0] = 1.0
|
127 |
-
vuv_vector[data <= 0.0] = 0.0
|
128 |
-
|
129 |
-
ip_data = data
|
130 |
-
|
131 |
-
frame_number = data.size
|
132 |
-
last_value = 0.0
|
133 |
-
for i in range(frame_number):
|
134 |
-
if data[i] <= 0.0:
|
135 |
-
j = i + 1
|
136 |
-
for j in range(i + 1, frame_number):
|
137 |
-
if data[j] > 0.0:
|
138 |
-
break
|
139 |
-
if j < frame_number - 1:
|
140 |
-
if last_value > 0.0:
|
141 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
142 |
-
for k in range(i, j):
|
143 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
144 |
-
else:
|
145 |
-
for k in range(i, j):
|
146 |
-
ip_data[k] = data[j]
|
147 |
-
else:
|
148 |
-
for k in range(i, frame_number):
|
149 |
-
ip_data[k] = last_value
|
150 |
-
else:
|
151 |
-
ip_data[i] = data[i] # this may not be necessary
|
152 |
-
last_value = data[i]
|
153 |
-
|
154 |
-
return ip_data[:,0], vuv_vector[:,0]
|
155 |
-
|
156 |
-
|
157 |
-
def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
|
158 |
-
import parselmouth
|
159 |
-
x = wav_numpy
|
160 |
-
if p_len is None:
|
161 |
-
p_len = x.shape[0]//hop_length
|
162 |
-
else:
|
163 |
-
assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
|
164 |
-
time_step = hop_length / sampling_rate * 1000
|
165 |
-
f0_min = 50
|
166 |
-
f0_max = 1100
|
167 |
-
f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(
|
168 |
-
time_step=time_step / 1000, voicing_threshold=0.6,
|
169 |
-
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
170 |
-
|
171 |
-
pad_size=(p_len - len(f0) + 1) // 2
|
172 |
-
if(pad_size>0 or p_len - len(f0) - pad_size>0):
|
173 |
-
f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
|
174 |
-
return f0
|
175 |
-
|
176 |
-
def resize_f0(x, target_len):
|
177 |
-
source = np.array(x)
|
178 |
-
source[source<0.001] = np.nan
|
179 |
-
target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
|
180 |
-
res = np.nan_to_num(target)
|
181 |
-
return res
|
182 |
-
|
183 |
-
def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
|
184 |
-
import pyworld
|
185 |
-
if p_len is None:
|
186 |
-
p_len = wav_numpy.shape[0]//hop_length
|
187 |
-
f0, t = pyworld.dio(
|
188 |
-
wav_numpy.astype(np.double),
|
189 |
-
fs=sampling_rate,
|
190 |
-
f0_ceil=800,
|
191 |
-
frame_period=1000 * hop_length / sampling_rate,
|
192 |
-
)
|
193 |
-
f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
|
194 |
-
for index, pitch in enumerate(f0):
|
195 |
-
f0[index] = round(pitch, 1)
|
196 |
-
return resize_f0(f0, p_len)
|
197 |
-
|
198 |
-
def f0_to_coarse(f0):
|
199 |
-
is_torch = isinstance(f0, torch.Tensor)
|
200 |
-
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
|
201 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
|
202 |
-
|
203 |
-
f0_mel[f0_mel <= 1] = 1
|
204 |
-
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
|
205 |
-
f0_coarse = (f0_mel + 0.5).int() if is_torch else np.rint(f0_mel).astype(np.int)
|
206 |
-
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
|
207 |
-
return f0_coarse
|
208 |
-
|
209 |
-
|
210 |
-
def get_hubert_model():
|
211 |
-
vec_path = "hubert/checkpoint_best_legacy_500.pt"
|
212 |
-
print("load model(s) from {}".format(vec_path))
|
213 |
-
from fairseq import checkpoint_utils
|
214 |
-
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
215 |
-
[vec_path],
|
216 |
-
suffix="",
|
217 |
-
)
|
218 |
-
model = models[0]
|
219 |
-
model.eval()
|
220 |
-
return model
|
221 |
-
|
222 |
-
def get_hubert_content(hmodel, wav_16k_tensor):
|
223 |
-
feats = wav_16k_tensor
|
224 |
-
if feats.dim() == 2: # double channels
|
225 |
-
feats = feats.mean(-1)
|
226 |
-
assert feats.dim() == 1, feats.dim()
|
227 |
-
feats = feats.view(1, -1)
|
228 |
-
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
229 |
-
inputs = {
|
230 |
-
"source": feats.to(wav_16k_tensor.device),
|
231 |
-
"padding_mask": padding_mask.to(wav_16k_tensor.device),
|
232 |
-
"output_layer": 9, # layer 9
|
233 |
-
}
|
234 |
-
with torch.no_grad():
|
235 |
-
logits = hmodel.extract_features(**inputs)
|
236 |
-
feats = hmodel.final_proj(logits[0])
|
237 |
-
return feats.transpose(1, 2)
|
238 |
-
|
239 |
-
|
240 |
-
def get_content(cmodel, y):
|
241 |
-
with torch.no_grad():
|
242 |
-
c = cmodel.extract_features(y.squeeze(1))[0]
|
243 |
-
c = c.transpose(1, 2)
|
244 |
-
return c
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
|
249 |
-
assert os.path.isfile(checkpoint_path)
|
250 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
251 |
-
iteration = checkpoint_dict['iteration']
|
252 |
-
learning_rate = checkpoint_dict['learning_rate']
|
253 |
-
if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
|
254 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
255 |
-
saved_state_dict = checkpoint_dict['model']
|
256 |
-
if hasattr(model, 'module'):
|
257 |
-
state_dict = model.module.state_dict()
|
258 |
-
else:
|
259 |
-
state_dict = model.state_dict()
|
260 |
-
new_state_dict = {}
|
261 |
-
for k, v in state_dict.items():
|
262 |
-
try:
|
263 |
-
# assert "dec" in k or "disc" in k
|
264 |
-
# print("load", k)
|
265 |
-
new_state_dict[k] = saved_state_dict[k]
|
266 |
-
assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
|
267 |
-
except:
|
268 |
-
print("error, %s is not in the checkpoint" % k)
|
269 |
-
logger.info("%s is not in the checkpoint" % k)
|
270 |
-
new_state_dict[k] = v
|
271 |
-
if hasattr(model, 'module'):
|
272 |
-
model.module.load_state_dict(new_state_dict)
|
273 |
-
else:
|
274 |
-
model.load_state_dict(new_state_dict)
|
275 |
-
print("load ")
|
276 |
-
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
277 |
-
checkpoint_path, iteration))
|
278 |
-
return model, optimizer, learning_rate, iteration
|
279 |
-
|
280 |
-
|
281 |
-
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
282 |
-
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
283 |
-
iteration, checkpoint_path))
|
284 |
-
if hasattr(model, 'module'):
|
285 |
-
state_dict = model.module.state_dict()
|
286 |
-
else:
|
287 |
-
state_dict = model.state_dict()
|
288 |
-
torch.save({'model': state_dict,
|
289 |
-
'iteration': iteration,
|
290 |
-
'optimizer': optimizer.state_dict(),
|
291 |
-
'learning_rate': learning_rate}, checkpoint_path)
|
292 |
-
|
293 |
-
def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
|
294 |
-
"""Freeing up space by deleting saved ckpts
|
295 |
-
|
296 |
-
Arguments:
|
297 |
-
path_to_models -- Path to the model directory
|
298 |
-
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
|
299 |
-
sort_by_time -- True -> chronologically delete ckpts
|
300 |
-
False -> lexicographically delete ckpts
|
301 |
-
"""
|
302 |
-
ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
|
303 |
-
name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
|
304 |
-
time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
|
305 |
-
sort_key = time_key if sort_by_time else name_key
|
306 |
-
x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
|
307 |
-
to_del = [os.path.join(path_to_models, fn) for fn in
|
308 |
-
(x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
|
309 |
-
del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
|
310 |
-
del_routine = lambda x: [os.remove(x), del_info(x)]
|
311 |
-
rs = [del_routine(fn) for fn in to_del]
|
312 |
-
|
313 |
-
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
314 |
-
for k, v in scalars.items():
|
315 |
-
writer.add_scalar(k, v, global_step)
|
316 |
-
for k, v in histograms.items():
|
317 |
-
writer.add_histogram(k, v, global_step)
|
318 |
-
for k, v in images.items():
|
319 |
-
writer.add_image(k, v, global_step, dataformats='HWC')
|
320 |
-
for k, v in audios.items():
|
321 |
-
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
322 |
-
|
323 |
-
|
324 |
-
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
325 |
-
f_list = glob.glob(os.path.join(dir_path, regex))
|
326 |
-
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
327 |
-
x = f_list[-1]
|
328 |
-
print(x)
|
329 |
-
return x
|
330 |
-
|
331 |
-
|
332 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
333 |
-
global MATPLOTLIB_FLAG
|
334 |
-
if not MATPLOTLIB_FLAG:
|
335 |
-
import matplotlib
|
336 |
-
matplotlib.use("Agg")
|
337 |
-
MATPLOTLIB_FLAG = True
|
338 |
-
mpl_logger = logging.getLogger('matplotlib')
|
339 |
-
mpl_logger.setLevel(logging.WARNING)
|
340 |
-
import matplotlib.pylab as plt
|
341 |
-
import numpy as np
|
342 |
-
|
343 |
-
fig, ax = plt.subplots(figsize=(10,2))
|
344 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
345 |
-
interpolation='none')
|
346 |
-
plt.colorbar(im, ax=ax)
|
347 |
-
plt.xlabel("Frames")
|
348 |
-
plt.ylabel("Channels")
|
349 |
-
plt.tight_layout()
|
350 |
-
|
351 |
-
fig.canvas.draw()
|
352 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
353 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
354 |
-
plt.close()
|
355 |
-
return data
|
356 |
-
|
357 |
-
|
358 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
359 |
-
global MATPLOTLIB_FLAG
|
360 |
-
if not MATPLOTLIB_FLAG:
|
361 |
-
import matplotlib
|
362 |
-
matplotlib.use("Agg")
|
363 |
-
MATPLOTLIB_FLAG = True
|
364 |
-
mpl_logger = logging.getLogger('matplotlib')
|
365 |
-
mpl_logger.setLevel(logging.WARNING)
|
366 |
-
import matplotlib.pylab as plt
|
367 |
-
import numpy as np
|
368 |
-
|
369 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
370 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
371 |
-
interpolation='none')
|
372 |
-
fig.colorbar(im, ax=ax)
|
373 |
-
xlabel = 'Decoder timestep'
|
374 |
-
if info is not None:
|
375 |
-
xlabel += '\n\n' + info
|
376 |
-
plt.xlabel(xlabel)
|
377 |
-
plt.ylabel('Encoder timestep')
|
378 |
-
plt.tight_layout()
|
379 |
-
|
380 |
-
fig.canvas.draw()
|
381 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
382 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
383 |
-
plt.close()
|
384 |
-
return data
|
385 |
-
|
386 |
-
|
387 |
-
def load_wav_to_torch(full_path):
|
388 |
-
sampling_rate, data = read(full_path)
|
389 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
390 |
-
|
391 |
-
|
392 |
-
def load_filepaths_and_text(filename, split="|"):
|
393 |
-
with open(filename, encoding='utf-8') as f:
|
394 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
395 |
-
return filepaths_and_text
|
396 |
-
|
397 |
-
|
398 |
-
def get_hparams(init=True):
|
399 |
-
parser = argparse.ArgumentParser()
|
400 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
401 |
-
help='JSON file for configuration')
|
402 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
403 |
-
help='Model name')
|
404 |
-
|
405 |
-
args = parser.parse_args()
|
406 |
-
model_dir = os.path.join("./logs", args.model)
|
407 |
-
|
408 |
-
if not os.path.exists(model_dir):
|
409 |
-
os.makedirs(model_dir)
|
410 |
-
|
411 |
-
config_path = args.config
|
412 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
413 |
-
if init:
|
414 |
-
with open(config_path, "r") as f:
|
415 |
-
data = f.read()
|
416 |
-
with open(config_save_path, "w") as f:
|
417 |
-
f.write(data)
|
418 |
-
else:
|
419 |
-
with open(config_save_path, "r") as f:
|
420 |
-
data = f.read()
|
421 |
-
config = json.loads(data)
|
422 |
-
|
423 |
-
hparams = HParams(**config)
|
424 |
-
hparams.model_dir = model_dir
|
425 |
-
return hparams
|
426 |
-
|
427 |
-
|
428 |
-
def get_hparams_from_dir(model_dir):
|
429 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
430 |
-
with open(config_save_path, "r") as f:
|
431 |
-
data = f.read()
|
432 |
-
config = json.loads(data)
|
433 |
-
|
434 |
-
hparams =HParams(**config)
|
435 |
-
hparams.model_dir = model_dir
|
436 |
-
return hparams
|
437 |
-
|
438 |
-
|
439 |
-
def get_hparams_from_file(config_path):
|
440 |
-
with open(config_path, "r") as f:
|
441 |
-
data = f.read()
|
442 |
-
config = json.loads(data)
|
443 |
-
|
444 |
-
hparams =HParams(**config)
|
445 |
-
return hparams
|
446 |
-
|
447 |
-
|
448 |
-
def check_git_hash(model_dir):
|
449 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
450 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
451 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
452 |
-
source_dir
|
453 |
-
))
|
454 |
-
return
|
455 |
-
|
456 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
457 |
-
|
458 |
-
path = os.path.join(model_dir, "githash")
|
459 |
-
if os.path.exists(path):
|
460 |
-
saved_hash = open(path).read()
|
461 |
-
if saved_hash != cur_hash:
|
462 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
463 |
-
saved_hash[:8], cur_hash[:8]))
|
464 |
-
else:
|
465 |
-
open(path, "w").write(cur_hash)
|
466 |
-
|
467 |
-
|
468 |
-
def get_logger(model_dir, filename="train.log"):
|
469 |
-
global logger
|
470 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
471 |
-
logger.setLevel(logging.DEBUG)
|
472 |
-
|
473 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
474 |
-
if not os.path.exists(model_dir):
|
475 |
-
os.makedirs(model_dir)
|
476 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
477 |
-
h.setLevel(logging.DEBUG)
|
478 |
-
h.setFormatter(formatter)
|
479 |
-
logger.addHandler(h)
|
480 |
-
return logger
|
481 |
-
|
482 |
-
|
483 |
-
def repeat_expand_2d(content, target_len):
|
484 |
-
# content : [h, t]
|
485 |
-
|
486 |
-
src_len = content.shape[-1]
|
487 |
-
target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device)
|
488 |
-
temp = torch.arange(src_len+1) * target_len / src_len
|
489 |
-
current_pos = 0
|
490 |
-
for i in range(target_len):
|
491 |
-
if i < temp[current_pos+1]:
|
492 |
-
target[:, i] = content[:, current_pos]
|
493 |
-
else:
|
494 |
-
current_pos += 1
|
495 |
-
target[:, i] = content[:, current_pos]
|
496 |
-
|
497 |
-
return target
|
498 |
-
|
499 |
-
|
500 |
-
def mix_model(model_paths,mix_rate,mode):
|
501 |
-
mix_rate = torch.FloatTensor(mix_rate)/100
|
502 |
-
model_tem = torch.load(model_paths[0])
|
503 |
-
models = [torch.load(path)["model"] for path in model_paths]
|
504 |
-
if mode == 0:
|
505 |
-
mix_rate = F.softmax(mix_rate,dim=0)
|
506 |
-
for k in model_tem["model"].keys():
|
507 |
-
model_tem["model"][k] = torch.zeros_like(model_tem["model"][k])
|
508 |
-
for i,model in enumerate(models):
|
509 |
-
model_tem["model"][k] += model[k]*mix_rate[i]
|
510 |
-
torch.save(model_tem,os.path.join(os.path.curdir,"output.pth"))
|
511 |
-
return os.path.join(os.path.curdir,"output.pth")
|
512 |
-
|
513 |
-
class HParams():
|
514 |
-
def __init__(self, **kwargs):
|
515 |
-
for k, v in kwargs.items():
|
516 |
-
if type(v) == dict:
|
517 |
-
v = HParams(**v)
|
518 |
-
self[k] = v
|
519 |
-
|
520 |
-
def keys(self):
|
521 |
-
return self.__dict__.keys()
|
522 |
-
|
523 |
-
def items(self):
|
524 |
-
return self.__dict__.items()
|
525 |
-
|
526 |
-
def values(self):
|
527 |
-
return self.__dict__.values()
|
528 |
-
|
529 |
-
def __len__(self):
|
530 |
-
return len(self.__dict__)
|
531 |
-
|
532 |
-
def __getitem__(self, key):
|
533 |
-
return getattr(self, key)
|
534 |
-
|
535 |
-
def __setitem__(self, key, value):
|
536 |
-
return setattr(self, key, value)
|
537 |
-
|
538 |
-
def __contains__(self, key):
|
539 |
-
return key in self.__dict__
|
540 |
-
|
541 |
-
def __repr__(self):
|
542 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/models.py
DELETED
@@ -1,435 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
from .env import AttrDict
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torch.nn as nn
|
8 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
9 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
10 |
-
from .utils import init_weights, get_padding
|
11 |
-
|
12 |
-
LRELU_SLOPE = 0.1
|
13 |
-
|
14 |
-
|
15 |
-
def load_model(model_path, device='cuda'):
|
16 |
-
config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
|
17 |
-
with open(config_file) as f:
|
18 |
-
data = f.read()
|
19 |
-
|
20 |
-
json_config = json.loads(data)
|
21 |
-
h = AttrDict(json_config)
|
22 |
-
|
23 |
-
generator = Generator(h).to(device)
|
24 |
-
|
25 |
-
cp_dict = torch.load(model_path, map_location=device)
|
26 |
-
generator.load_state_dict(cp_dict['generator'])
|
27 |
-
generator.eval()
|
28 |
-
generator.remove_weight_norm()
|
29 |
-
del cp_dict
|
30 |
-
return generator, h
|
31 |
-
|
32 |
-
|
33 |
-
class ResBlock1(torch.nn.Module):
|
34 |
-
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
|
35 |
-
super(ResBlock1, self).__init__()
|
36 |
-
self.h = h
|
37 |
-
self.convs1 = nn.ModuleList([
|
38 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
39 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
40 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
41 |
-
padding=get_padding(kernel_size, dilation[1]))),
|
42 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
43 |
-
padding=get_padding(kernel_size, dilation[2])))
|
44 |
-
])
|
45 |
-
self.convs1.apply(init_weights)
|
46 |
-
|
47 |
-
self.convs2 = nn.ModuleList([
|
48 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
49 |
-
padding=get_padding(kernel_size, 1))),
|
50 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
51 |
-
padding=get_padding(kernel_size, 1))),
|
52 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
53 |
-
padding=get_padding(kernel_size, 1)))
|
54 |
-
])
|
55 |
-
self.convs2.apply(init_weights)
|
56 |
-
|
57 |
-
def forward(self, x):
|
58 |
-
for c1, c2 in zip(self.convs1, self.convs2):
|
59 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
60 |
-
xt = c1(xt)
|
61 |
-
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
62 |
-
xt = c2(xt)
|
63 |
-
x = xt + x
|
64 |
-
return x
|
65 |
-
|
66 |
-
def remove_weight_norm(self):
|
67 |
-
for l in self.convs1:
|
68 |
-
remove_weight_norm(l)
|
69 |
-
for l in self.convs2:
|
70 |
-
remove_weight_norm(l)
|
71 |
-
|
72 |
-
|
73 |
-
class ResBlock2(torch.nn.Module):
|
74 |
-
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
|
75 |
-
super(ResBlock2, self).__init__()
|
76 |
-
self.h = h
|
77 |
-
self.convs = nn.ModuleList([
|
78 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
79 |
-
padding=get_padding(kernel_size, dilation[0]))),
|
80 |
-
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
81 |
-
padding=get_padding(kernel_size, dilation[1])))
|
82 |
-
])
|
83 |
-
self.convs.apply(init_weights)
|
84 |
-
|
85 |
-
def forward(self, x):
|
86 |
-
for c in self.convs:
|
87 |
-
xt = F.leaky_relu(x, LRELU_SLOPE)
|
88 |
-
xt = c(xt)
|
89 |
-
x = xt + x
|
90 |
-
return x
|
91 |
-
|
92 |
-
def remove_weight_norm(self):
|
93 |
-
for l in self.convs:
|
94 |
-
remove_weight_norm(l)
|
95 |
-
|
96 |
-
|
97 |
-
class SineGen(torch.nn.Module):
|
98 |
-
""" Definition of sine generator
|
99 |
-
SineGen(samp_rate, harmonic_num = 0,
|
100 |
-
sine_amp = 0.1, noise_std = 0.003,
|
101 |
-
voiced_threshold = 0,
|
102 |
-
flag_for_pulse=False)
|
103 |
-
samp_rate: sampling rate in Hz
|
104 |
-
harmonic_num: number of harmonic overtones (default 0)
|
105 |
-
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
106 |
-
noise_std: std of Gaussian noise (default 0.003)
|
107 |
-
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
108 |
-
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
109 |
-
Note: when flag_for_pulse is True, the first time step of a voiced
|
110 |
-
segment is always sin(np.pi) or cos(0)
|
111 |
-
"""
|
112 |
-
|
113 |
-
def __init__(self, samp_rate, harmonic_num=0,
|
114 |
-
sine_amp=0.1, noise_std=0.003,
|
115 |
-
voiced_threshold=0):
|
116 |
-
super(SineGen, self).__init__()
|
117 |
-
self.sine_amp = sine_amp
|
118 |
-
self.noise_std = noise_std
|
119 |
-
self.harmonic_num = harmonic_num
|
120 |
-
self.dim = self.harmonic_num + 1
|
121 |
-
self.sampling_rate = samp_rate
|
122 |
-
self.voiced_threshold = voiced_threshold
|
123 |
-
|
124 |
-
def _f02uv(self, f0):
|
125 |
-
# generate uv signal
|
126 |
-
uv = torch.ones_like(f0)
|
127 |
-
uv = uv * (f0 > self.voiced_threshold)
|
128 |
-
return uv
|
129 |
-
|
130 |
-
@torch.no_grad()
|
131 |
-
def forward(self, f0, upp):
|
132 |
-
""" sine_tensor, uv = forward(f0)
|
133 |
-
input F0: tensor(batchsize=1, length, dim=1)
|
134 |
-
f0 for unvoiced steps should be 0
|
135 |
-
output sine_tensor: tensor(batchsize=1, length, dim)
|
136 |
-
output uv: tensor(batchsize=1, length, 1)
|
137 |
-
"""
|
138 |
-
f0 = f0.unsqueeze(-1)
|
139 |
-
fn = torch.multiply(f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1)))
|
140 |
-
rad_values = (fn / self.sampling_rate) % 1 ###%1 means the product of n_har cannot be optimized for post-processing
|
141 |
-
rand_ini = torch.rand(fn.shape[0], fn.shape[2], device=fn.device)
|
142 |
-
rand_ini[:, 0] = 0
|
143 |
-
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
144 |
-
is_half = rad_values.dtype is not torch.float32
|
145 |
-
tmp_over_one = torch.cumsum(rad_values.double(), 1) # % 1 #####%1 means the following cumsum can no longer be optimized
|
146 |
-
if is_half:
|
147 |
-
tmp_over_one = tmp_over_one.half()
|
148 |
-
else:
|
149 |
-
tmp_over_one = tmp_over_one.float()
|
150 |
-
tmp_over_one *= upp
|
151 |
-
tmp_over_one = F.interpolate(
|
152 |
-
tmp_over_one.transpose(2, 1), scale_factor=upp,
|
153 |
-
mode='linear', align_corners=True
|
154 |
-
).transpose(2, 1)
|
155 |
-
rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
|
156 |
-
tmp_over_one %= 1
|
157 |
-
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
158 |
-
cumsum_shift = torch.zeros_like(rad_values)
|
159 |
-
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
160 |
-
rad_values = rad_values.double()
|
161 |
-
cumsum_shift = cumsum_shift.double()
|
162 |
-
sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
163 |
-
if is_half:
|
164 |
-
sine_waves = sine_waves.half()
|
165 |
-
else:
|
166 |
-
sine_waves = sine_waves.float()
|
167 |
-
sine_waves = sine_waves * self.sine_amp
|
168 |
-
uv = self._f02uv(f0)
|
169 |
-
uv = F.interpolate(uv.transpose(2, 1), scale_factor=upp, mode='nearest').transpose(2, 1)
|
170 |
-
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
171 |
-
noise = noise_amp * torch.randn_like(sine_waves)
|
172 |
-
sine_waves = sine_waves * uv + noise
|
173 |
-
return sine_waves, uv, noise
|
174 |
-
|
175 |
-
|
176 |
-
class SourceModuleHnNSF(torch.nn.Module):
|
177 |
-
""" SourceModule for hn-nsf
|
178 |
-
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
179 |
-
add_noise_std=0.003, voiced_threshod=0)
|
180 |
-
sampling_rate: sampling_rate in Hz
|
181 |
-
harmonic_num: number of harmonic above F0 (default: 0)
|
182 |
-
sine_amp: amplitude of sine source signal (default: 0.1)
|
183 |
-
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
184 |
-
note that amplitude of noise in unvoiced is decided
|
185 |
-
by sine_amp
|
186 |
-
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
187 |
-
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
188 |
-
F0_sampled (batchsize, length, 1)
|
189 |
-
Sine_source (batchsize, length, 1)
|
190 |
-
noise_source (batchsize, length 1)
|
191 |
-
uv (batchsize, length, 1)
|
192 |
-
"""
|
193 |
-
|
194 |
-
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
|
195 |
-
add_noise_std=0.003, voiced_threshod=0):
|
196 |
-
super(SourceModuleHnNSF, self).__init__()
|
197 |
-
|
198 |
-
self.sine_amp = sine_amp
|
199 |
-
self.noise_std = add_noise_std
|
200 |
-
|
201 |
-
# to produce sine waveforms
|
202 |
-
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
|
203 |
-
sine_amp, add_noise_std, voiced_threshod)
|
204 |
-
|
205 |
-
# to merge source harmonics into a single excitation
|
206 |
-
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
207 |
-
self.l_tanh = torch.nn.Tanh()
|
208 |
-
|
209 |
-
def forward(self, x, upp):
|
210 |
-
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
211 |
-
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
212 |
-
return sine_merge
|
213 |
-
|
214 |
-
|
215 |
-
class Generator(torch.nn.Module):
|
216 |
-
def __init__(self, h):
|
217 |
-
super(Generator, self).__init__()
|
218 |
-
self.h = h
|
219 |
-
self.num_kernels = len(h.resblock_kernel_sizes)
|
220 |
-
self.num_upsamples = len(h.upsample_rates)
|
221 |
-
self.m_source = SourceModuleHnNSF(
|
222 |
-
sampling_rate=h.sampling_rate,
|
223 |
-
harmonic_num=8
|
224 |
-
)
|
225 |
-
self.noise_convs = nn.ModuleList()
|
226 |
-
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
|
227 |
-
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
|
228 |
-
|
229 |
-
self.ups = nn.ModuleList()
|
230 |
-
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
|
231 |
-
c_cur = h.upsample_initial_channel // (2 ** (i + 1))
|
232 |
-
self.ups.append(weight_norm(
|
233 |
-
ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)),
|
234 |
-
k, u, padding=(k - u) // 2)))
|
235 |
-
if i + 1 < len(h.upsample_rates): #
|
236 |
-
stride_f0 = int(np.prod(h.upsample_rates[i + 1:]))
|
237 |
-
self.noise_convs.append(Conv1d(
|
238 |
-
1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
|
239 |
-
else:
|
240 |
-
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
241 |
-
self.resblocks = nn.ModuleList()
|
242 |
-
ch = h.upsample_initial_channel
|
243 |
-
for i in range(len(self.ups)):
|
244 |
-
ch //= 2
|
245 |
-
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
|
246 |
-
self.resblocks.append(resblock(h, ch, k, d))
|
247 |
-
|
248 |
-
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
|
249 |
-
self.ups.apply(init_weights)
|
250 |
-
self.conv_post.apply(init_weights)
|
251 |
-
self.upp = int(np.prod(h.upsample_rates))
|
252 |
-
|
253 |
-
def forward(self, x, f0):
|
254 |
-
har_source = self.m_source(f0, self.upp).transpose(1, 2)
|
255 |
-
x = self.conv_pre(x)
|
256 |
-
for i in range(self.num_upsamples):
|
257 |
-
x = F.leaky_relu(x, LRELU_SLOPE)
|
258 |
-
x = self.ups[i](x)
|
259 |
-
x_source = self.noise_convs[i](har_source)
|
260 |
-
x = x + x_source
|
261 |
-
xs = None
|
262 |
-
for j in range(self.num_kernels):
|
263 |
-
if xs is None:
|
264 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
-
else:
|
266 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
-
x = xs / self.num_kernels
|
268 |
-
x = F.leaky_relu(x)
|
269 |
-
x = self.conv_post(x)
|
270 |
-
x = torch.tanh(x)
|
271 |
-
|
272 |
-
return x
|
273 |
-
|
274 |
-
def remove_weight_norm(self):
|
275 |
-
print('Removing weight norm...')
|
276 |
-
for l in self.ups:
|
277 |
-
remove_weight_norm(l)
|
278 |
-
for l in self.resblocks:
|
279 |
-
l.remove_weight_norm()
|
280 |
-
remove_weight_norm(self.conv_pre)
|
281 |
-
remove_weight_norm(self.conv_post)
|
282 |
-
|
283 |
-
|
284 |
-
class DiscriminatorP(torch.nn.Module):
|
285 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
286 |
-
super(DiscriminatorP, self).__init__()
|
287 |
-
self.period = period
|
288 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
289 |
-
self.convs = nn.ModuleList([
|
290 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
291 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
292 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
293 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
294 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
|
295 |
-
])
|
296 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
297 |
-
|
298 |
-
def forward(self, x):
|
299 |
-
fmap = []
|
300 |
-
|
301 |
-
# 1d to 2d
|
302 |
-
b, c, t = x.shape
|
303 |
-
if t % self.period != 0: # pad first
|
304 |
-
n_pad = self.period - (t % self.period)
|
305 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
306 |
-
t = t + n_pad
|
307 |
-
x = x.view(b, c, t // self.period, self.period)
|
308 |
-
|
309 |
-
for l in self.convs:
|
310 |
-
x = l(x)
|
311 |
-
x = F.leaky_relu(x, LRELU_SLOPE)
|
312 |
-
fmap.append(x)
|
313 |
-
x = self.conv_post(x)
|
314 |
-
fmap.append(x)
|
315 |
-
x = torch.flatten(x, 1, -1)
|
316 |
-
|
317 |
-
return x, fmap
|
318 |
-
|
319 |
-
|
320 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
321 |
-
def __init__(self, periods=None):
|
322 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
323 |
-
self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
|
324 |
-
self.discriminators = nn.ModuleList()
|
325 |
-
for period in self.periods:
|
326 |
-
self.discriminators.append(DiscriminatorP(period))
|
327 |
-
|
328 |
-
def forward(self, y, y_hat):
|
329 |
-
y_d_rs = []
|
330 |
-
y_d_gs = []
|
331 |
-
fmap_rs = []
|
332 |
-
fmap_gs = []
|
333 |
-
for i, d in enumerate(self.discriminators):
|
334 |
-
y_d_r, fmap_r = d(y)
|
335 |
-
y_d_g, fmap_g = d(y_hat)
|
336 |
-
y_d_rs.append(y_d_r)
|
337 |
-
fmap_rs.append(fmap_r)
|
338 |
-
y_d_gs.append(y_d_g)
|
339 |
-
fmap_gs.append(fmap_g)
|
340 |
-
|
341 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
342 |
-
|
343 |
-
|
344 |
-
class DiscriminatorS(torch.nn.Module):
|
345 |
-
def __init__(self, use_spectral_norm=False):
|
346 |
-
super(DiscriminatorS, self).__init__()
|
347 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
348 |
-
self.convs = nn.ModuleList([
|
349 |
-
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
|
350 |
-
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
|
351 |
-
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
|
352 |
-
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
|
353 |
-
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
|
354 |
-
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
|
355 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
356 |
-
])
|
357 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
358 |
-
|
359 |
-
def forward(self, x):
|
360 |
-
fmap = []
|
361 |
-
for l in self.convs:
|
362 |
-
x = l(x)
|
363 |
-
x = F.leaky_relu(x, LRELU_SLOPE)
|
364 |
-
fmap.append(x)
|
365 |
-
x = self.conv_post(x)
|
366 |
-
fmap.append(x)
|
367 |
-
x = torch.flatten(x, 1, -1)
|
368 |
-
|
369 |
-
return x, fmap
|
370 |
-
|
371 |
-
|
372 |
-
class MultiScaleDiscriminator(torch.nn.Module):
|
373 |
-
def __init__(self):
|
374 |
-
super(MultiScaleDiscriminator, self).__init__()
|
375 |
-
self.discriminators = nn.ModuleList([
|
376 |
-
DiscriminatorS(use_spectral_norm=True),
|
377 |
-
DiscriminatorS(),
|
378 |
-
DiscriminatorS(),
|
379 |
-
])
|
380 |
-
self.meanpools = nn.ModuleList([
|
381 |
-
AvgPool1d(4, 2, padding=2),
|
382 |
-
AvgPool1d(4, 2, padding=2)
|
383 |
-
])
|
384 |
-
|
385 |
-
def forward(self, y, y_hat):
|
386 |
-
y_d_rs = []
|
387 |
-
y_d_gs = []
|
388 |
-
fmap_rs = []
|
389 |
-
fmap_gs = []
|
390 |
-
for i, d in enumerate(self.discriminators):
|
391 |
-
if i != 0:
|
392 |
-
y = self.meanpools[i - 1](y)
|
393 |
-
y_hat = self.meanpools[i - 1](y_hat)
|
394 |
-
y_d_r, fmap_r = d(y)
|
395 |
-
y_d_g, fmap_g = d(y_hat)
|
396 |
-
y_d_rs.append(y_d_r)
|
397 |
-
fmap_rs.append(fmap_r)
|
398 |
-
y_d_gs.append(y_d_g)
|
399 |
-
fmap_gs.append(fmap_g)
|
400 |
-
|
401 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
402 |
-
|
403 |
-
|
404 |
-
def feature_loss(fmap_r, fmap_g):
|
405 |
-
loss = 0
|
406 |
-
for dr, dg in zip(fmap_r, fmap_g):
|
407 |
-
for rl, gl in zip(dr, dg):
|
408 |
-
loss += torch.mean(torch.abs(rl - gl))
|
409 |
-
|
410 |
-
return loss * 2
|
411 |
-
|
412 |
-
|
413 |
-
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
414 |
-
loss = 0
|
415 |
-
r_losses = []
|
416 |
-
g_losses = []
|
417 |
-
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
418 |
-
r_loss = torch.mean((1 - dr) ** 2)
|
419 |
-
g_loss = torch.mean(dg ** 2)
|
420 |
-
loss += (r_loss + g_loss)
|
421 |
-
r_losses.append(r_loss.item())
|
422 |
-
g_losses.append(g_loss.item())
|
423 |
-
|
424 |
-
return loss, r_losses, g_losses
|
425 |
-
|
426 |
-
|
427 |
-
def generator_loss(disc_outputs):
|
428 |
-
loss = 0
|
429 |
-
gen_losses = []
|
430 |
-
for dg in disc_outputs:
|
431 |
-
l = torch.mean((1 - dg) ** 2)
|
432 |
-
gen_losses.append(l)
|
433 |
-
loss += l
|
434 |
-
|
435 |
-
return loss, gen_losses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/models.py
DELETED
@@ -1,541 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
import modules
|
8 |
-
import attentions
|
9 |
-
|
10 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
11 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
-
from commons import init_weights, get_padding
|
13 |
-
|
14 |
-
|
15 |
-
class StochasticDurationPredictor(nn.Module):
|
16 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
17 |
-
super().__init__()
|
18 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
19 |
-
self.in_channels = in_channels
|
20 |
-
self.filter_channels = filter_channels
|
21 |
-
self.kernel_size = kernel_size
|
22 |
-
self.p_dropout = p_dropout
|
23 |
-
self.n_flows = n_flows
|
24 |
-
self.gin_channels = gin_channels
|
25 |
-
|
26 |
-
self.log_flow = modules.Log()
|
27 |
-
self.flows = nn.ModuleList()
|
28 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
29 |
-
for i in range(n_flows):
|
30 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
31 |
-
self.flows.append(modules.Flip())
|
32 |
-
|
33 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
34 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
35 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
36 |
-
self.post_flows = nn.ModuleList()
|
37 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
38 |
-
for i in range(4):
|
39 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
40 |
-
self.post_flows.append(modules.Flip())
|
41 |
-
|
42 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
43 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
44 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
45 |
-
if gin_channels != 0:
|
46 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
47 |
-
|
48 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
49 |
-
x = torch.detach(x)
|
50 |
-
x = self.pre(x)
|
51 |
-
if g is not None:
|
52 |
-
g = torch.detach(g)
|
53 |
-
x = x + self.cond(g)
|
54 |
-
x = self.convs(x, x_mask)
|
55 |
-
x = self.proj(x) * x_mask
|
56 |
-
|
57 |
-
if not reverse:
|
58 |
-
flows = self.flows
|
59 |
-
assert w is not None
|
60 |
-
|
61 |
-
logdet_tot_q = 0
|
62 |
-
h_w = self.post_pre(w)
|
63 |
-
h_w = self.post_convs(h_w, x_mask)
|
64 |
-
h_w = self.post_proj(h_w) * x_mask
|
65 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
66 |
-
z_q = e_q
|
67 |
-
for flow in self.post_flows:
|
68 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
69 |
-
logdet_tot_q += logdet_q
|
70 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
71 |
-
u = torch.sigmoid(z_u) * x_mask
|
72 |
-
z0 = (w - u) * x_mask
|
73 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
74 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
75 |
-
|
76 |
-
logdet_tot = 0
|
77 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
78 |
-
logdet_tot += logdet
|
79 |
-
z = torch.cat([z0, z1], 1)
|
80 |
-
for flow in flows:
|
81 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
82 |
-
logdet_tot = logdet_tot + logdet
|
83 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
84 |
-
return nll + logq # [b]
|
85 |
-
else:
|
86 |
-
flows = list(reversed(self.flows))
|
87 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
88 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
89 |
-
for flow in flows:
|
90 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
91 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
92 |
-
logw = z0
|
93 |
-
return logw
|
94 |
-
|
95 |
-
|
96 |
-
class DurationPredictor(nn.Module):
|
97 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
98 |
-
super().__init__()
|
99 |
-
|
100 |
-
self.in_channels = in_channels
|
101 |
-
self.filter_channels = filter_channels
|
102 |
-
self.kernel_size = kernel_size
|
103 |
-
self.p_dropout = p_dropout
|
104 |
-
self.gin_channels = gin_channels
|
105 |
-
|
106 |
-
self.drop = nn.Dropout(p_dropout)
|
107 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
108 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
109 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
110 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
111 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
112 |
-
|
113 |
-
if gin_channels != 0:
|
114 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
115 |
-
|
116 |
-
def forward(self, x, x_mask, g=None):
|
117 |
-
x = torch.detach(x)
|
118 |
-
if g is not None:
|
119 |
-
g = torch.detach(g)
|
120 |
-
x = x + self.cond(g)
|
121 |
-
x = self.conv_1(x * x_mask)
|
122 |
-
x = torch.relu(x)
|
123 |
-
x = self.norm_1(x)
|
124 |
-
x = self.drop(x)
|
125 |
-
x = self.conv_2(x * x_mask)
|
126 |
-
x = torch.relu(x)
|
127 |
-
x = self.norm_2(x)
|
128 |
-
x = self.drop(x)
|
129 |
-
x = self.proj(x * x_mask)
|
130 |
-
return x * x_mask
|
131 |
-
|
132 |
-
|
133 |
-
class TextEncoder(nn.Module):
|
134 |
-
def __init__(self,
|
135 |
-
n_vocab,
|
136 |
-
out_channels,
|
137 |
-
hidden_channels,
|
138 |
-
filter_channels,
|
139 |
-
n_heads,
|
140 |
-
n_layers,
|
141 |
-
kernel_size,
|
142 |
-
p_dropout,
|
143 |
-
emotion_embedding):
|
144 |
-
super().__init__()
|
145 |
-
self.n_vocab = n_vocab
|
146 |
-
self.out_channels = out_channels
|
147 |
-
self.hidden_channels = hidden_channels
|
148 |
-
self.filter_channels = filter_channels
|
149 |
-
self.n_heads = n_heads
|
150 |
-
self.n_layers = n_layers
|
151 |
-
self.kernel_size = kernel_size
|
152 |
-
self.p_dropout = p_dropout
|
153 |
-
self.emotion_embedding = emotion_embedding
|
154 |
-
|
155 |
-
if self.n_vocab!=0:
|
156 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
157 |
-
if emotion_embedding:
|
158 |
-
self.emotion_emb = nn.Linear(1024, hidden_channels)
|
159 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
160 |
-
|
161 |
-
self.encoder = attentions.Encoder(
|
162 |
-
hidden_channels,
|
163 |
-
filter_channels,
|
164 |
-
n_heads,
|
165 |
-
n_layers,
|
166 |
-
kernel_size,
|
167 |
-
p_dropout)
|
168 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
169 |
-
|
170 |
-
def forward(self, x, x_lengths, emotion_embedding=None):
|
171 |
-
if self.n_vocab!=0:
|
172 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
173 |
-
if emotion_embedding is not None:
|
174 |
-
x = x + self.emotion_emb(emotion_embedding.unsqueeze(1))
|
175 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
176 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
177 |
-
|
178 |
-
x = self.encoder(x * x_mask, x_mask)
|
179 |
-
stats = self.proj(x) * x_mask
|
180 |
-
|
181 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
182 |
-
return x, m, logs, x_mask
|
183 |
-
|
184 |
-
|
185 |
-
class ResidualCouplingBlock(nn.Module):
|
186 |
-
def __init__(self,
|
187 |
-
channels,
|
188 |
-
hidden_channels,
|
189 |
-
kernel_size,
|
190 |
-
dilation_rate,
|
191 |
-
n_layers,
|
192 |
-
n_flows=4,
|
193 |
-
gin_channels=0):
|
194 |
-
super().__init__()
|
195 |
-
self.channels = channels
|
196 |
-
self.hidden_channels = hidden_channels
|
197 |
-
self.kernel_size = kernel_size
|
198 |
-
self.dilation_rate = dilation_rate
|
199 |
-
self.n_layers = n_layers
|
200 |
-
self.n_flows = n_flows
|
201 |
-
self.gin_channels = gin_channels
|
202 |
-
|
203 |
-
self.flows = nn.ModuleList()
|
204 |
-
for i in range(n_flows):
|
205 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
206 |
-
self.flows.append(modules.Flip())
|
207 |
-
|
208 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
209 |
-
if not reverse:
|
210 |
-
for flow in self.flows:
|
211 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
212 |
-
else:
|
213 |
-
for flow in reversed(self.flows):
|
214 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
215 |
-
return x
|
216 |
-
|
217 |
-
|
218 |
-
class PosteriorEncoder(nn.Module):
|
219 |
-
def __init__(self,
|
220 |
-
in_channels,
|
221 |
-
out_channels,
|
222 |
-
hidden_channels,
|
223 |
-
kernel_size,
|
224 |
-
dilation_rate,
|
225 |
-
n_layers,
|
226 |
-
gin_channels=0):
|
227 |
-
super().__init__()
|
228 |
-
self.in_channels = in_channels
|
229 |
-
self.out_channels = out_channels
|
230 |
-
self.hidden_channels = hidden_channels
|
231 |
-
self.kernel_size = kernel_size
|
232 |
-
self.dilation_rate = dilation_rate
|
233 |
-
self.n_layers = n_layers
|
234 |
-
self.gin_channels = gin_channels
|
235 |
-
|
236 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
237 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
238 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
239 |
-
|
240 |
-
def forward(self, x, x_lengths, g=None):
|
241 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
242 |
-
x = self.pre(x) * x_mask
|
243 |
-
x = self.enc(x, x_mask, g=g)
|
244 |
-
stats = self.proj(x) * x_mask
|
245 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
246 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
247 |
-
return z, m, logs, x_mask
|
248 |
-
|
249 |
-
|
250 |
-
class Generator(torch.nn.Module):
|
251 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
252 |
-
super(Generator, self).__init__()
|
253 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
254 |
-
self.num_upsamples = len(upsample_rates)
|
255 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
256 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
257 |
-
|
258 |
-
self.ups = nn.ModuleList()
|
259 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
260 |
-
self.ups.append(weight_norm(
|
261 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
262 |
-
k, u, padding=(k-u)//2)))
|
263 |
-
|
264 |
-
self.resblocks = nn.ModuleList()
|
265 |
-
for i in range(len(self.ups)):
|
266 |
-
ch = upsample_initial_channel//(2**(i+1))
|
267 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
268 |
-
self.resblocks.append(resblock(ch, k, d))
|
269 |
-
|
270 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
271 |
-
self.ups.apply(init_weights)
|
272 |
-
|
273 |
-
if gin_channels != 0:
|
274 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
275 |
-
|
276 |
-
def forward(self, x, g=None):
|
277 |
-
x = self.conv_pre(x)
|
278 |
-
if g is not None:
|
279 |
-
x = x + self.cond(g)
|
280 |
-
|
281 |
-
for i in range(self.num_upsamples):
|
282 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
283 |
-
x = self.ups[i](x)
|
284 |
-
xs = None
|
285 |
-
for j in range(self.num_kernels):
|
286 |
-
if xs is None:
|
287 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
288 |
-
else:
|
289 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
290 |
-
x = xs / self.num_kernels
|
291 |
-
x = F.leaky_relu(x)
|
292 |
-
x = self.conv_post(x)
|
293 |
-
x = torch.tanh(x)
|
294 |
-
|
295 |
-
return x
|
296 |
-
|
297 |
-
def remove_weight_norm(self):
|
298 |
-
print('Removing weight norm...')
|
299 |
-
for l in self.ups:
|
300 |
-
remove_weight_norm(l)
|
301 |
-
for l in self.resblocks:
|
302 |
-
l.remove_weight_norm()
|
303 |
-
|
304 |
-
|
305 |
-
class DiscriminatorP(torch.nn.Module):
|
306 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
307 |
-
super(DiscriminatorP, self).__init__()
|
308 |
-
self.period = period
|
309 |
-
self.use_spectral_norm = use_spectral_norm
|
310 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
311 |
-
self.convs = nn.ModuleList([
|
312 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
313 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
314 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
315 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
316 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
317 |
-
])
|
318 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
319 |
-
|
320 |
-
def forward(self, x):
|
321 |
-
fmap = []
|
322 |
-
|
323 |
-
# 1d to 2d
|
324 |
-
b, c, t = x.shape
|
325 |
-
if t % self.period != 0: # pad first
|
326 |
-
n_pad = self.period - (t % self.period)
|
327 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
328 |
-
t = t + n_pad
|
329 |
-
x = x.view(b, c, t // self.period, self.period)
|
330 |
-
|
331 |
-
for l in self.convs:
|
332 |
-
x = l(x)
|
333 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
334 |
-
fmap.append(x)
|
335 |
-
x = self.conv_post(x)
|
336 |
-
fmap.append(x)
|
337 |
-
x = torch.flatten(x, 1, -1)
|
338 |
-
|
339 |
-
return x, fmap
|
340 |
-
|
341 |
-
|
342 |
-
class DiscriminatorS(torch.nn.Module):
|
343 |
-
def __init__(self, use_spectral_norm=False):
|
344 |
-
super(DiscriminatorS, self).__init__()
|
345 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
346 |
-
self.convs = nn.ModuleList([
|
347 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
348 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
349 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
350 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
351 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
352 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
353 |
-
])
|
354 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
355 |
-
|
356 |
-
def forward(self, x):
|
357 |
-
fmap = []
|
358 |
-
|
359 |
-
for l in self.convs:
|
360 |
-
x = l(x)
|
361 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
362 |
-
fmap.append(x)
|
363 |
-
x = self.conv_post(x)
|
364 |
-
fmap.append(x)
|
365 |
-
x = torch.flatten(x, 1, -1)
|
366 |
-
|
367 |
-
return x, fmap
|
368 |
-
|
369 |
-
|
370 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
371 |
-
def __init__(self, use_spectral_norm=False):
|
372 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
373 |
-
periods = [2,3,5,7,11]
|
374 |
-
|
375 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
376 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
377 |
-
self.discriminators = nn.ModuleList(discs)
|
378 |
-
|
379 |
-
def forward(self, y, y_hat):
|
380 |
-
y_d_rs = []
|
381 |
-
y_d_gs = []
|
382 |
-
fmap_rs = []
|
383 |
-
fmap_gs = []
|
384 |
-
for i, d in enumerate(self.discriminators):
|
385 |
-
y_d_r, fmap_r = d(y)
|
386 |
-
y_d_g, fmap_g = d(y_hat)
|
387 |
-
y_d_rs.append(y_d_r)
|
388 |
-
y_d_gs.append(y_d_g)
|
389 |
-
fmap_rs.append(fmap_r)
|
390 |
-
fmap_gs.append(fmap_g)
|
391 |
-
|
392 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
class SynthesizerTrn(nn.Module):
|
397 |
-
"""
|
398 |
-
Synthesizer for Training
|
399 |
-
"""
|
400 |
-
|
401 |
-
def __init__(self,
|
402 |
-
n_vocab,
|
403 |
-
spec_channels,
|
404 |
-
segment_size,
|
405 |
-
inter_channels,
|
406 |
-
hidden_channels,
|
407 |
-
filter_channels,
|
408 |
-
n_heads,
|
409 |
-
n_layers,
|
410 |
-
kernel_size,
|
411 |
-
p_dropout,
|
412 |
-
resblock,
|
413 |
-
resblock_kernel_sizes,
|
414 |
-
resblock_dilation_sizes,
|
415 |
-
upsample_rates,
|
416 |
-
upsample_initial_channel,
|
417 |
-
upsample_kernel_sizes,
|
418 |
-
n_speakers=0,
|
419 |
-
gin_channels=0,
|
420 |
-
use_sdp=True,
|
421 |
-
emotion_embedding=False,
|
422 |
-
**kwargs):
|
423 |
-
|
424 |
-
super().__init__()
|
425 |
-
self.n_vocab = n_vocab
|
426 |
-
self.spec_channels = spec_channels
|
427 |
-
self.inter_channels = inter_channels
|
428 |
-
self.hidden_channels = hidden_channels
|
429 |
-
self.filter_channels = filter_channels
|
430 |
-
self.n_heads = n_heads
|
431 |
-
self.n_layers = n_layers
|
432 |
-
self.kernel_size = kernel_size
|
433 |
-
self.p_dropout = p_dropout
|
434 |
-
self.resblock = resblock
|
435 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
436 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
437 |
-
self.upsample_rates = upsample_rates
|
438 |
-
self.upsample_initial_channel = upsample_initial_channel
|
439 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
440 |
-
self.segment_size = segment_size
|
441 |
-
self.n_speakers = n_speakers
|
442 |
-
self.gin_channels = gin_channels
|
443 |
-
|
444 |
-
self.use_sdp = use_sdp
|
445 |
-
|
446 |
-
self.enc_p = TextEncoder(n_vocab,
|
447 |
-
inter_channels,
|
448 |
-
hidden_channels,
|
449 |
-
filter_channels,
|
450 |
-
n_heads,
|
451 |
-
n_layers,
|
452 |
-
kernel_size,
|
453 |
-
p_dropout,
|
454 |
-
emotion_embedding)
|
455 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
456 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
457 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
458 |
-
|
459 |
-
if use_sdp:
|
460 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
461 |
-
else:
|
462 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
463 |
-
|
464 |
-
if n_speakers > 1:
|
465 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
466 |
-
|
467 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
468 |
-
|
469 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
470 |
-
if self.n_speakers > 0:
|
471 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
472 |
-
else:
|
473 |
-
g = None
|
474 |
-
|
475 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
476 |
-
z_p = self.flow(z, y_mask, g=g)
|
477 |
-
|
478 |
-
with torch.no_grad():
|
479 |
-
# negative cross-entropy
|
480 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
481 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
482 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
483 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
484 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
485 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
486 |
-
|
487 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
488 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
489 |
-
|
490 |
-
w = attn.sum(2)
|
491 |
-
if self.use_sdp:
|
492 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
493 |
-
l_length = l_length / torch.sum(x_mask)
|
494 |
-
else:
|
495 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
496 |
-
logw = self.dp(x, x_mask, g=g)
|
497 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
498 |
-
|
499 |
-
# expand prior
|
500 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
501 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
502 |
-
|
503 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
504 |
-
o = self.dec(z_slice, g=g)
|
505 |
-
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
506 |
-
|
507 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None):
|
508 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
|
509 |
-
if self.n_speakers > 0:
|
510 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
511 |
-
else:
|
512 |
-
g = None
|
513 |
-
|
514 |
-
if self.use_sdp:
|
515 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
516 |
-
else:
|
517 |
-
logw = self.dp(x, x_mask, g=g)
|
518 |
-
w = torch.exp(logw) * x_mask * length_scale
|
519 |
-
w_ceil = torch.ceil(w)
|
520 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
521 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
522 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
523 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
524 |
-
|
525 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
526 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
527 |
-
|
528 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
529 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
530 |
-
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
531 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
532 |
-
|
533 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
534 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
535 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
536 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
537 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
538 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
539 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
540 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
541 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aleqsd/openjourney/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: openjourney
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.10.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: akhaliq/openjourney
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/fetch_data/places_standard_test_val_sample.sh
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
mkdir -p places_standard_dataset/val_hires/
|
2 |
-
mkdir -p places_standard_dataset/visual_test_hires/
|
3 |
-
|
4 |
-
|
5 |
-
# randomly sample images for test and vis
|
6 |
-
OUT=$(python3 fetch_data/sampler.py)
|
7 |
-
echo ${OUT}
|
8 |
-
|
9 |
-
FILELIST=$(cat places_standard_dataset/original/test_random_files.txt)
|
10 |
-
|
11 |
-
for i in $FILELIST
|
12 |
-
do
|
13 |
-
$(cp ${i} places_standard_dataset/val_hires/)
|
14 |
-
done
|
15 |
-
|
16 |
-
FILELIST=$(cat places_standard_dataset/original/val_random_files.txt)
|
17 |
-
|
18 |
-
for i in $FILELIST
|
19 |
-
do
|
20 |
-
$(cp ${i} places_standard_dataset/visual_test_hires/)
|
21 |
-
done
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/latex/attention/introduction.tex
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
2 |
-
|
3 |
-
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
4 |
-
%\marginpar{not sure if the memory constraints are understandable here}
|
5 |
-
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
6 |
-
|
7 |
-
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
8 |
-
|
9 |
-
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
10 |
-
|
11 |
-
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
12 |
-
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
13 |
-
|
14 |
-
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
15 |
-
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
16 |
-
|
17 |
-
% Just a standard paragraph with citations, rewrite.
|
18 |
-
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py
DELETED
@@ -1,1020 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import itertools
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
from pathlib import Path
|
7 |
-
from typing import Iterable, Optional
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import PIL
|
11 |
-
import torch
|
12 |
-
import torch.nn.functional as F
|
13 |
-
import torch.utils.checkpoint
|
14 |
-
from accelerate import Accelerator
|
15 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
16 |
-
from huggingface_hub import HfFolder, Repository, whoami
|
17 |
-
from neural_compressor.utils import logger
|
18 |
-
from packaging import version
|
19 |
-
from PIL import Image
|
20 |
-
from torch.utils.data import Dataset
|
21 |
-
from torchvision import transforms
|
22 |
-
from tqdm.auto import tqdm
|
23 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
24 |
-
|
25 |
-
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
|
26 |
-
from diffusers.optimization import get_scheduler
|
27 |
-
|
28 |
-
|
29 |
-
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
30 |
-
PIL_INTERPOLATION = {
|
31 |
-
"linear": PIL.Image.Resampling.BILINEAR,
|
32 |
-
"bilinear": PIL.Image.Resampling.BILINEAR,
|
33 |
-
"bicubic": PIL.Image.Resampling.BICUBIC,
|
34 |
-
"lanczos": PIL.Image.Resampling.LANCZOS,
|
35 |
-
"nearest": PIL.Image.Resampling.NEAREST,
|
36 |
-
}
|
37 |
-
else:
|
38 |
-
PIL_INTERPOLATION = {
|
39 |
-
"linear": PIL.Image.LINEAR,
|
40 |
-
"bilinear": PIL.Image.BILINEAR,
|
41 |
-
"bicubic": PIL.Image.BICUBIC,
|
42 |
-
"lanczos": PIL.Image.LANCZOS,
|
43 |
-
"nearest": PIL.Image.NEAREST,
|
44 |
-
}
|
45 |
-
# ------------------------------------------------------------------------------
|
46 |
-
|
47 |
-
|
48 |
-
def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
|
49 |
-
logger.info("Saving embeddings")
|
50 |
-
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
|
51 |
-
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
|
52 |
-
torch.save(learned_embeds_dict, save_path)
|
53 |
-
|
54 |
-
|
55 |
-
def parse_args():
|
56 |
-
parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.")
|
57 |
-
parser.add_argument(
|
58 |
-
"--save_steps",
|
59 |
-
type=int,
|
60 |
-
default=500,
|
61 |
-
help="Save learned_embeds.bin every X updates steps.",
|
62 |
-
)
|
63 |
-
parser.add_argument(
|
64 |
-
"--pretrained_model_name_or_path",
|
65 |
-
type=str,
|
66 |
-
default=None,
|
67 |
-
required=True,
|
68 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
69 |
-
)
|
70 |
-
parser.add_argument(
|
71 |
-
"--revision",
|
72 |
-
type=str,
|
73 |
-
default=None,
|
74 |
-
required=False,
|
75 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
76 |
-
)
|
77 |
-
parser.add_argument(
|
78 |
-
"--tokenizer_name",
|
79 |
-
type=str,
|
80 |
-
default=None,
|
81 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
82 |
-
)
|
83 |
-
parser.add_argument(
|
84 |
-
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
|
85 |
-
)
|
86 |
-
parser.add_argument(
|
87 |
-
"--placeholder_token",
|
88 |
-
type=str,
|
89 |
-
default=None,
|
90 |
-
required=True,
|
91 |
-
help="A token to use as a placeholder for the concept.",
|
92 |
-
)
|
93 |
-
parser.add_argument(
|
94 |
-
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
|
95 |
-
)
|
96 |
-
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
|
97 |
-
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
|
98 |
-
parser.add_argument(
|
99 |
-
"--output_dir",
|
100 |
-
type=str,
|
101 |
-
default="text-inversion-model",
|
102 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
103 |
-
)
|
104 |
-
parser.add_argument(
|
105 |
-
"--cache_dir",
|
106 |
-
type=str,
|
107 |
-
default=None,
|
108 |
-
help="The directory where the downloaded models and datasets will be stored.",
|
109 |
-
)
|
110 |
-
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
|
111 |
-
parser.add_argument(
|
112 |
-
"--resolution",
|
113 |
-
type=int,
|
114 |
-
default=512,
|
115 |
-
help=(
|
116 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
117 |
-
" resolution"
|
118 |
-
),
|
119 |
-
)
|
120 |
-
parser.add_argument(
|
121 |
-
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
|
125 |
-
)
|
126 |
-
parser.add_argument("--num_train_epochs", type=int, default=100)
|
127 |
-
parser.add_argument(
|
128 |
-
"--max_train_steps",
|
129 |
-
type=int,
|
130 |
-
default=5000,
|
131 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
132 |
-
)
|
133 |
-
parser.add_argument(
|
134 |
-
"--gradient_accumulation_steps",
|
135 |
-
type=int,
|
136 |
-
default=1,
|
137 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
138 |
-
)
|
139 |
-
parser.add_argument(
|
140 |
-
"--learning_rate",
|
141 |
-
type=float,
|
142 |
-
default=1e-4,
|
143 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
144 |
-
)
|
145 |
-
parser.add_argument(
|
146 |
-
"--scale_lr",
|
147 |
-
action="store_true",
|
148 |
-
default=False,
|
149 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
150 |
-
)
|
151 |
-
parser.add_argument(
|
152 |
-
"--lr_scheduler",
|
153 |
-
type=str,
|
154 |
-
default="constant",
|
155 |
-
help=(
|
156 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
157 |
-
' "constant", "constant_with_warmup"]'
|
158 |
-
),
|
159 |
-
)
|
160 |
-
parser.add_argument(
|
161 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
162 |
-
)
|
163 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
164 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
165 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
166 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
167 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
168 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
169 |
-
parser.add_argument(
|
170 |
-
"--hub_model_id",
|
171 |
-
type=str,
|
172 |
-
default=None,
|
173 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
174 |
-
)
|
175 |
-
parser.add_argument(
|
176 |
-
"--logging_dir",
|
177 |
-
type=str,
|
178 |
-
default="logs",
|
179 |
-
help=(
|
180 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
181 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
182 |
-
),
|
183 |
-
)
|
184 |
-
parser.add_argument(
|
185 |
-
"--mixed_precision",
|
186 |
-
type=str,
|
187 |
-
default="no",
|
188 |
-
choices=["no", "fp16", "bf16"],
|
189 |
-
help=(
|
190 |
-
"Whether to use mixed precision. Choose"
|
191 |
-
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
192 |
-
"and an Nvidia Ampere GPU."
|
193 |
-
),
|
194 |
-
)
|
195 |
-
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
|
196 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
197 |
-
parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.")
|
198 |
-
parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.")
|
199 |
-
parser.add_argument(
|
200 |
-
"--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model."
|
201 |
-
)
|
202 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
203 |
-
|
204 |
-
args = parser.parse_args()
|
205 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
206 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
207 |
-
args.local_rank = env_local_rank
|
208 |
-
|
209 |
-
if args.train_data_dir is None:
|
210 |
-
raise ValueError("You must specify a train data directory.")
|
211 |
-
|
212 |
-
return args
|
213 |
-
|
214 |
-
|
215 |
-
imagenet_templates_small = [
|
216 |
-
"a photo of a {}",
|
217 |
-
"a rendering of a {}",
|
218 |
-
"a cropped photo of the {}",
|
219 |
-
"the photo of a {}",
|
220 |
-
"a photo of a clean {}",
|
221 |
-
"a photo of a dirty {}",
|
222 |
-
"a dark photo of the {}",
|
223 |
-
"a photo of my {}",
|
224 |
-
"a photo of the cool {}",
|
225 |
-
"a close-up photo of a {}",
|
226 |
-
"a bright photo of the {}",
|
227 |
-
"a cropped photo of a {}",
|
228 |
-
"a photo of the {}",
|
229 |
-
"a good photo of the {}",
|
230 |
-
"a photo of one {}",
|
231 |
-
"a close-up photo of the {}",
|
232 |
-
"a rendition of the {}",
|
233 |
-
"a photo of the clean {}",
|
234 |
-
"a rendition of a {}",
|
235 |
-
"a photo of a nice {}",
|
236 |
-
"a good photo of a {}",
|
237 |
-
"a photo of the nice {}",
|
238 |
-
"a photo of the small {}",
|
239 |
-
"a photo of the weird {}",
|
240 |
-
"a photo of the large {}",
|
241 |
-
"a photo of a cool {}",
|
242 |
-
"a photo of a small {}",
|
243 |
-
]
|
244 |
-
|
245 |
-
imagenet_style_templates_small = [
|
246 |
-
"a painting in the style of {}",
|
247 |
-
"a rendering in the style of {}",
|
248 |
-
"a cropped painting in the style of {}",
|
249 |
-
"the painting in the style of {}",
|
250 |
-
"a clean painting in the style of {}",
|
251 |
-
"a dirty painting in the style of {}",
|
252 |
-
"a dark painting in the style of {}",
|
253 |
-
"a picture in the style of {}",
|
254 |
-
"a cool painting in the style of {}",
|
255 |
-
"a close-up painting in the style of {}",
|
256 |
-
"a bright painting in the style of {}",
|
257 |
-
"a cropped painting in the style of {}",
|
258 |
-
"a good painting in the style of {}",
|
259 |
-
"a close-up painting in the style of {}",
|
260 |
-
"a rendition in the style of {}",
|
261 |
-
"a nice painting in the style of {}",
|
262 |
-
"a small painting in the style of {}",
|
263 |
-
"a weird painting in the style of {}",
|
264 |
-
"a large painting in the style of {}",
|
265 |
-
]
|
266 |
-
|
267 |
-
|
268 |
-
# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14
|
269 |
-
class EMAModel:
|
270 |
-
"""
|
271 |
-
Exponential Moving Average of models weights
|
272 |
-
"""
|
273 |
-
|
274 |
-
def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999):
|
275 |
-
parameters = list(parameters)
|
276 |
-
self.shadow_params = [p.clone().detach() for p in parameters]
|
277 |
-
|
278 |
-
self.decay = decay
|
279 |
-
self.optimization_step = 0
|
280 |
-
|
281 |
-
def get_decay(self, optimization_step):
|
282 |
-
"""
|
283 |
-
Compute the decay factor for the exponential moving average.
|
284 |
-
"""
|
285 |
-
value = (1 + optimization_step) / (10 + optimization_step)
|
286 |
-
return 1 - min(self.decay, value)
|
287 |
-
|
288 |
-
@torch.no_grad()
|
289 |
-
def step(self, parameters):
|
290 |
-
parameters = list(parameters)
|
291 |
-
|
292 |
-
self.optimization_step += 1
|
293 |
-
self.decay = self.get_decay(self.optimization_step)
|
294 |
-
|
295 |
-
for s_param, param in zip(self.shadow_params, parameters):
|
296 |
-
if param.requires_grad:
|
297 |
-
tmp = self.decay * (s_param - param)
|
298 |
-
s_param.sub_(tmp)
|
299 |
-
else:
|
300 |
-
s_param.copy_(param)
|
301 |
-
|
302 |
-
torch.cuda.empty_cache()
|
303 |
-
|
304 |
-
def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None:
|
305 |
-
"""
|
306 |
-
Copy current averaged parameters into given collection of parameters.
|
307 |
-
Args:
|
308 |
-
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
309 |
-
updated with the stored moving averages. If `None`, the
|
310 |
-
parameters with which this `ExponentialMovingAverage` was
|
311 |
-
initialized will be used.
|
312 |
-
"""
|
313 |
-
parameters = list(parameters)
|
314 |
-
for s_param, param in zip(self.shadow_params, parameters):
|
315 |
-
param.data.copy_(s_param.data)
|
316 |
-
|
317 |
-
def to(self, device=None, dtype=None) -> None:
|
318 |
-
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
|
319 |
-
Args:
|
320 |
-
device: like `device` argument to `torch.Tensor.to`
|
321 |
-
"""
|
322 |
-
# .to() on the tensors handles None correctly
|
323 |
-
self.shadow_params = [
|
324 |
-
p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)
|
325 |
-
for p in self.shadow_params
|
326 |
-
]
|
327 |
-
|
328 |
-
|
329 |
-
class TextualInversionDataset(Dataset):
|
330 |
-
def __init__(
|
331 |
-
self,
|
332 |
-
data_root,
|
333 |
-
tokenizer,
|
334 |
-
learnable_property="object", # [object, style]
|
335 |
-
size=512,
|
336 |
-
repeats=100,
|
337 |
-
interpolation="bicubic",
|
338 |
-
flip_p=0.5,
|
339 |
-
set="train",
|
340 |
-
placeholder_token="*",
|
341 |
-
center_crop=False,
|
342 |
-
):
|
343 |
-
self.data_root = data_root
|
344 |
-
self.tokenizer = tokenizer
|
345 |
-
self.learnable_property = learnable_property
|
346 |
-
self.size = size
|
347 |
-
self.placeholder_token = placeholder_token
|
348 |
-
self.center_crop = center_crop
|
349 |
-
self.flip_p = flip_p
|
350 |
-
|
351 |
-
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
|
352 |
-
|
353 |
-
self.num_images = len(self.image_paths)
|
354 |
-
self._length = self.num_images
|
355 |
-
|
356 |
-
if set == "train":
|
357 |
-
self._length = self.num_images * repeats
|
358 |
-
|
359 |
-
self.interpolation = {
|
360 |
-
"linear": PIL_INTERPOLATION["linear"],
|
361 |
-
"bilinear": PIL_INTERPOLATION["bilinear"],
|
362 |
-
"bicubic": PIL_INTERPOLATION["bicubic"],
|
363 |
-
"lanczos": PIL_INTERPOLATION["lanczos"],
|
364 |
-
}[interpolation]
|
365 |
-
|
366 |
-
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
|
367 |
-
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
|
368 |
-
|
369 |
-
def __len__(self):
|
370 |
-
return self._length
|
371 |
-
|
372 |
-
def __getitem__(self, i):
|
373 |
-
example = {}
|
374 |
-
image = Image.open(self.image_paths[i % self.num_images])
|
375 |
-
|
376 |
-
if not image.mode == "RGB":
|
377 |
-
image = image.convert("RGB")
|
378 |
-
|
379 |
-
placeholder_string = self.placeholder_token
|
380 |
-
text = random.choice(self.templates).format(placeholder_string)
|
381 |
-
|
382 |
-
example["input_ids"] = self.tokenizer(
|
383 |
-
text,
|
384 |
-
padding="max_length",
|
385 |
-
truncation=True,
|
386 |
-
max_length=self.tokenizer.model_max_length,
|
387 |
-
return_tensors="pt",
|
388 |
-
).input_ids[0]
|
389 |
-
|
390 |
-
# default to score-sde preprocessing
|
391 |
-
img = np.array(image).astype(np.uint8)
|
392 |
-
|
393 |
-
if self.center_crop:
|
394 |
-
crop = min(img.shape[0], img.shape[1])
|
395 |
-
(
|
396 |
-
h,
|
397 |
-
w,
|
398 |
-
) = (
|
399 |
-
img.shape[0],
|
400 |
-
img.shape[1],
|
401 |
-
)
|
402 |
-
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
|
403 |
-
|
404 |
-
image = Image.fromarray(img)
|
405 |
-
image = image.resize((self.size, self.size), resample=self.interpolation)
|
406 |
-
|
407 |
-
image = self.flip_transform(image)
|
408 |
-
image = np.array(image).astype(np.uint8)
|
409 |
-
image = (image / 127.5 - 1.0).astype(np.float32)
|
410 |
-
|
411 |
-
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
|
412 |
-
return example
|
413 |
-
|
414 |
-
|
415 |
-
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
|
416 |
-
if token is None:
|
417 |
-
token = HfFolder.get_token()
|
418 |
-
if organization is None:
|
419 |
-
username = whoami(token)["name"]
|
420 |
-
return f"{username}/{model_id}"
|
421 |
-
else:
|
422 |
-
return f"{organization}/{model_id}"
|
423 |
-
|
424 |
-
|
425 |
-
def freeze_params(params):
|
426 |
-
for param in params:
|
427 |
-
param.requires_grad = False
|
428 |
-
|
429 |
-
|
430 |
-
def image_grid(imgs, rows, cols):
|
431 |
-
if not len(imgs) == rows * cols:
|
432 |
-
raise ValueError("The specified number of rows and columns are not correct.")
|
433 |
-
|
434 |
-
w, h = imgs[0].size
|
435 |
-
grid = Image.new("RGB", size=(cols * w, rows * h))
|
436 |
-
grid_w, grid_h = grid.size
|
437 |
-
|
438 |
-
for i, img in enumerate(imgs):
|
439 |
-
grid.paste(img, box=(i % cols * w, i // cols * h))
|
440 |
-
return grid
|
441 |
-
|
442 |
-
|
443 |
-
def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42):
|
444 |
-
generator = torch.Generator(pipeline.device).manual_seed(seed)
|
445 |
-
images = pipeline(
|
446 |
-
prompt,
|
447 |
-
guidance_scale=guidance_scale,
|
448 |
-
num_inference_steps=num_inference_steps,
|
449 |
-
generator=generator,
|
450 |
-
num_images_per_prompt=num_images_per_prompt,
|
451 |
-
).images
|
452 |
-
_rows = int(math.sqrt(num_images_per_prompt))
|
453 |
-
grid = image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows)
|
454 |
-
return grid
|
455 |
-
|
456 |
-
|
457 |
-
def main():
|
458 |
-
args = parse_args()
|
459 |
-
logging_dir = os.path.join(args.output_dir, args.logging_dir)
|
460 |
-
|
461 |
-
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
|
462 |
-
|
463 |
-
accelerator = Accelerator(
|
464 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
465 |
-
mixed_precision=args.mixed_precision,
|
466 |
-
log_with="tensorboard",
|
467 |
-
project_config=accelerator_project_config,
|
468 |
-
)
|
469 |
-
|
470 |
-
# If passed along, set the training seed now.
|
471 |
-
if args.seed is not None:
|
472 |
-
set_seed(args.seed)
|
473 |
-
|
474 |
-
# Handle the repository creation
|
475 |
-
if accelerator.is_main_process:
|
476 |
-
if args.push_to_hub:
|
477 |
-
if args.hub_model_id is None:
|
478 |
-
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
479 |
-
else:
|
480 |
-
repo_name = args.hub_model_id
|
481 |
-
repo = Repository(args.output_dir, clone_from=repo_name)
|
482 |
-
|
483 |
-
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
484 |
-
if "step_*" not in gitignore:
|
485 |
-
gitignore.write("step_*\n")
|
486 |
-
if "epoch_*" not in gitignore:
|
487 |
-
gitignore.write("epoch_*\n")
|
488 |
-
elif args.output_dir is not None:
|
489 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
490 |
-
|
491 |
-
# Load the tokenizer and add the placeholder token as a additional special token
|
492 |
-
if args.tokenizer_name:
|
493 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
494 |
-
elif args.pretrained_model_name_or_path:
|
495 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
|
496 |
-
|
497 |
-
# Load models and create wrapper for stable diffusion
|
498 |
-
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
|
499 |
-
text_encoder = CLIPTextModel.from_pretrained(
|
500 |
-
args.pretrained_model_name_or_path,
|
501 |
-
subfolder="text_encoder",
|
502 |
-
revision=args.revision,
|
503 |
-
)
|
504 |
-
vae = AutoencoderKL.from_pretrained(
|
505 |
-
args.pretrained_model_name_or_path,
|
506 |
-
subfolder="vae",
|
507 |
-
revision=args.revision,
|
508 |
-
)
|
509 |
-
unet = UNet2DConditionModel.from_pretrained(
|
510 |
-
args.pretrained_model_name_or_path,
|
511 |
-
subfolder="unet",
|
512 |
-
revision=args.revision,
|
513 |
-
)
|
514 |
-
|
515 |
-
train_unet = False
|
516 |
-
# Freeze vae and unet
|
517 |
-
freeze_params(vae.parameters())
|
518 |
-
if not args.do_quantization and not args.do_distillation:
|
519 |
-
# Add the placeholder token in tokenizer
|
520 |
-
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
|
521 |
-
if num_added_tokens == 0:
|
522 |
-
raise ValueError(
|
523 |
-
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
|
524 |
-
" `placeholder_token` that is not already in the tokenizer."
|
525 |
-
)
|
526 |
-
|
527 |
-
# Convert the initializer_token, placeholder_token to ids
|
528 |
-
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
|
529 |
-
# Check if initializer_token is a single token or a sequence of tokens
|
530 |
-
if len(token_ids) > 1:
|
531 |
-
raise ValueError("The initializer token must be a single token.")
|
532 |
-
|
533 |
-
initializer_token_id = token_ids[0]
|
534 |
-
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
|
535 |
-
# Resize the token embeddings as we are adding new special tokens to the tokenizer
|
536 |
-
text_encoder.resize_token_embeddings(len(tokenizer))
|
537 |
-
|
538 |
-
# Initialise the newly added placeholder token with the embeddings of the initializer token
|
539 |
-
token_embeds = text_encoder.get_input_embeddings().weight.data
|
540 |
-
token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
|
541 |
-
|
542 |
-
freeze_params(unet.parameters())
|
543 |
-
# Freeze all parameters except for the token embeddings in text encoder
|
544 |
-
params_to_freeze = itertools.chain(
|
545 |
-
text_encoder.text_model.encoder.parameters(),
|
546 |
-
text_encoder.text_model.final_layer_norm.parameters(),
|
547 |
-
text_encoder.text_model.embeddings.position_embedding.parameters(),
|
548 |
-
)
|
549 |
-
freeze_params(params_to_freeze)
|
550 |
-
else:
|
551 |
-
train_unet = True
|
552 |
-
freeze_params(text_encoder.parameters())
|
553 |
-
|
554 |
-
if args.scale_lr:
|
555 |
-
args.learning_rate = (
|
556 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
557 |
-
)
|
558 |
-
|
559 |
-
# Initialize the optimizer
|
560 |
-
optimizer = torch.optim.AdamW(
|
561 |
-
# only optimize the unet or embeddings of text_encoder
|
562 |
-
unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(),
|
563 |
-
lr=args.learning_rate,
|
564 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
565 |
-
weight_decay=args.adam_weight_decay,
|
566 |
-
eps=args.adam_epsilon,
|
567 |
-
)
|
568 |
-
|
569 |
-
train_dataset = TextualInversionDataset(
|
570 |
-
data_root=args.train_data_dir,
|
571 |
-
tokenizer=tokenizer,
|
572 |
-
size=args.resolution,
|
573 |
-
placeholder_token=args.placeholder_token,
|
574 |
-
repeats=args.repeats,
|
575 |
-
learnable_property=args.learnable_property,
|
576 |
-
center_crop=args.center_crop,
|
577 |
-
set="train",
|
578 |
-
)
|
579 |
-
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
|
580 |
-
|
581 |
-
# Scheduler and math around the number of training steps.
|
582 |
-
overrode_max_train_steps = False
|
583 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
584 |
-
if args.max_train_steps is None:
|
585 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
586 |
-
overrode_max_train_steps = True
|
587 |
-
|
588 |
-
lr_scheduler = get_scheduler(
|
589 |
-
args.lr_scheduler,
|
590 |
-
optimizer=optimizer,
|
591 |
-
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
|
592 |
-
num_training_steps=args.max_train_steps * accelerator.num_processes,
|
593 |
-
)
|
594 |
-
|
595 |
-
if not train_unet:
|
596 |
-
text_encoder = accelerator.prepare(text_encoder)
|
597 |
-
unet.to(accelerator.device)
|
598 |
-
unet.eval()
|
599 |
-
else:
|
600 |
-
unet = accelerator.prepare(unet)
|
601 |
-
text_encoder.to(accelerator.device)
|
602 |
-
text_encoder.eval()
|
603 |
-
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
|
604 |
-
|
605 |
-
# Move vae to device
|
606 |
-
vae.to(accelerator.device)
|
607 |
-
|
608 |
-
# Keep vae in eval model as we don't train these
|
609 |
-
vae.eval()
|
610 |
-
|
611 |
-
compression_manager = None
|
612 |
-
|
613 |
-
def train_func(model):
|
614 |
-
if train_unet:
|
615 |
-
unet_ = model
|
616 |
-
text_encoder_ = text_encoder
|
617 |
-
else:
|
618 |
-
unet_ = unet
|
619 |
-
text_encoder_ = model
|
620 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
621 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
622 |
-
if overrode_max_train_steps:
|
623 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
624 |
-
# Afterwards we recalculate our number of training epochs
|
625 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
626 |
-
|
627 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
628 |
-
# The trackers initializes automatically on the main process.
|
629 |
-
if accelerator.is_main_process:
|
630 |
-
accelerator.init_trackers("textual_inversion", config=vars(args))
|
631 |
-
|
632 |
-
# Train!
|
633 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
634 |
-
|
635 |
-
logger.info("***** Running training *****")
|
636 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
637 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
638 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
639 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
640 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
641 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
642 |
-
# Only show the progress bar once on each machine.
|
643 |
-
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
644 |
-
progress_bar.set_description("Steps")
|
645 |
-
global_step = 0
|
646 |
-
|
647 |
-
if train_unet and args.use_ema:
|
648 |
-
ema_unet = EMAModel(unet_.parameters())
|
649 |
-
|
650 |
-
for epoch in range(args.num_train_epochs):
|
651 |
-
model.train()
|
652 |
-
train_loss = 0.0
|
653 |
-
for step, batch in enumerate(train_dataloader):
|
654 |
-
with accelerator.accumulate(model):
|
655 |
-
# Convert images to latent space
|
656 |
-
latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach()
|
657 |
-
latents = latents * 0.18215
|
658 |
-
|
659 |
-
# Sample noise that we'll add to the latents
|
660 |
-
noise = torch.randn(latents.shape).to(latents.device)
|
661 |
-
bsz = latents.shape[0]
|
662 |
-
# Sample a random timestep for each image
|
663 |
-
timesteps = torch.randint(
|
664 |
-
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
|
665 |
-
).long()
|
666 |
-
|
667 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
668 |
-
# (this is the forward diffusion process)
|
669 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
670 |
-
|
671 |
-
# Get the text embedding for conditioning
|
672 |
-
encoder_hidden_states = text_encoder_(batch["input_ids"])[0]
|
673 |
-
|
674 |
-
# Predict the noise residual
|
675 |
-
model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample
|
676 |
-
|
677 |
-
loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean()
|
678 |
-
if train_unet and compression_manager:
|
679 |
-
unet_inputs = {
|
680 |
-
"sample": noisy_latents,
|
681 |
-
"timestep": timesteps,
|
682 |
-
"encoder_hidden_states": encoder_hidden_states,
|
683 |
-
}
|
684 |
-
loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss)
|
685 |
-
|
686 |
-
# Gather the losses across all processes for logging (if we use distributed training).
|
687 |
-
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
|
688 |
-
train_loss += avg_loss.item() / args.gradient_accumulation_steps
|
689 |
-
|
690 |
-
# Backpropagate
|
691 |
-
accelerator.backward(loss)
|
692 |
-
|
693 |
-
if train_unet:
|
694 |
-
if accelerator.sync_gradients:
|
695 |
-
accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm)
|
696 |
-
else:
|
697 |
-
# Zero out the gradients for all token embeddings except the newly added
|
698 |
-
# embeddings for the concept, as we only want to optimize the concept embeddings
|
699 |
-
if accelerator.num_processes > 1:
|
700 |
-
grads = text_encoder_.module.get_input_embeddings().weight.grad
|
701 |
-
else:
|
702 |
-
grads = text_encoder_.get_input_embeddings().weight.grad
|
703 |
-
# Get the index for tokens that we want to zero the grads for
|
704 |
-
index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id
|
705 |
-
grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0)
|
706 |
-
|
707 |
-
optimizer.step()
|
708 |
-
lr_scheduler.step()
|
709 |
-
optimizer.zero_grad()
|
710 |
-
|
711 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
712 |
-
if accelerator.sync_gradients:
|
713 |
-
if train_unet and args.use_ema:
|
714 |
-
ema_unet.step(unet_.parameters())
|
715 |
-
progress_bar.update(1)
|
716 |
-
global_step += 1
|
717 |
-
accelerator.log({"train_loss": train_loss}, step=global_step)
|
718 |
-
train_loss = 0.0
|
719 |
-
if not train_unet and global_step % args.save_steps == 0:
|
720 |
-
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
|
721 |
-
save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path)
|
722 |
-
|
723 |
-
logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
724 |
-
progress_bar.set_postfix(**logs)
|
725 |
-
accelerator.log(logs, step=global_step)
|
726 |
-
|
727 |
-
if global_step >= args.max_train_steps:
|
728 |
-
break
|
729 |
-
accelerator.wait_for_everyone()
|
730 |
-
|
731 |
-
if train_unet and args.use_ema:
|
732 |
-
ema_unet.copy_to(unet_.parameters())
|
733 |
-
|
734 |
-
if not train_unet:
|
735 |
-
return text_encoder_
|
736 |
-
|
737 |
-
if not train_unet:
|
738 |
-
text_encoder = train_func(text_encoder)
|
739 |
-
else:
|
740 |
-
import copy
|
741 |
-
|
742 |
-
model = copy.deepcopy(unet)
|
743 |
-
confs = []
|
744 |
-
if args.do_quantization:
|
745 |
-
from neural_compressor import QuantizationAwareTrainingConfig
|
746 |
-
|
747 |
-
q_conf = QuantizationAwareTrainingConfig()
|
748 |
-
confs.append(q_conf)
|
749 |
-
|
750 |
-
if args.do_distillation:
|
751 |
-
teacher_model = copy.deepcopy(model)
|
752 |
-
|
753 |
-
def attention_fetcher(x):
|
754 |
-
return x.sample
|
755 |
-
|
756 |
-
layer_mappings = [
|
757 |
-
[
|
758 |
-
[
|
759 |
-
"conv_in",
|
760 |
-
]
|
761 |
-
],
|
762 |
-
[
|
763 |
-
[
|
764 |
-
"time_embedding",
|
765 |
-
]
|
766 |
-
],
|
767 |
-
[["down_blocks.0.attentions.0", attention_fetcher]],
|
768 |
-
[["down_blocks.0.attentions.1", attention_fetcher]],
|
769 |
-
[
|
770 |
-
[
|
771 |
-
"down_blocks.0.resnets.0",
|
772 |
-
]
|
773 |
-
],
|
774 |
-
[
|
775 |
-
[
|
776 |
-
"down_blocks.0.resnets.1",
|
777 |
-
]
|
778 |
-
],
|
779 |
-
[
|
780 |
-
[
|
781 |
-
"down_blocks.0.downsamplers.0",
|
782 |
-
]
|
783 |
-
],
|
784 |
-
[["down_blocks.1.attentions.0", attention_fetcher]],
|
785 |
-
[["down_blocks.1.attentions.1", attention_fetcher]],
|
786 |
-
[
|
787 |
-
[
|
788 |
-
"down_blocks.1.resnets.0",
|
789 |
-
]
|
790 |
-
],
|
791 |
-
[
|
792 |
-
[
|
793 |
-
"down_blocks.1.resnets.1",
|
794 |
-
]
|
795 |
-
],
|
796 |
-
[
|
797 |
-
[
|
798 |
-
"down_blocks.1.downsamplers.0",
|
799 |
-
]
|
800 |
-
],
|
801 |
-
[["down_blocks.2.attentions.0", attention_fetcher]],
|
802 |
-
[["down_blocks.2.attentions.1", attention_fetcher]],
|
803 |
-
[
|
804 |
-
[
|
805 |
-
"down_blocks.2.resnets.0",
|
806 |
-
]
|
807 |
-
],
|
808 |
-
[
|
809 |
-
[
|
810 |
-
"down_blocks.2.resnets.1",
|
811 |
-
]
|
812 |
-
],
|
813 |
-
[
|
814 |
-
[
|
815 |
-
"down_blocks.2.downsamplers.0",
|
816 |
-
]
|
817 |
-
],
|
818 |
-
[
|
819 |
-
[
|
820 |
-
"down_blocks.3.resnets.0",
|
821 |
-
]
|
822 |
-
],
|
823 |
-
[
|
824 |
-
[
|
825 |
-
"down_blocks.3.resnets.1",
|
826 |
-
]
|
827 |
-
],
|
828 |
-
[
|
829 |
-
[
|
830 |
-
"up_blocks.0.resnets.0",
|
831 |
-
]
|
832 |
-
],
|
833 |
-
[
|
834 |
-
[
|
835 |
-
"up_blocks.0.resnets.1",
|
836 |
-
]
|
837 |
-
],
|
838 |
-
[
|
839 |
-
[
|
840 |
-
"up_blocks.0.resnets.2",
|
841 |
-
]
|
842 |
-
],
|
843 |
-
[
|
844 |
-
[
|
845 |
-
"up_blocks.0.upsamplers.0",
|
846 |
-
]
|
847 |
-
],
|
848 |
-
[["up_blocks.1.attentions.0", attention_fetcher]],
|
849 |
-
[["up_blocks.1.attentions.1", attention_fetcher]],
|
850 |
-
[["up_blocks.1.attentions.2", attention_fetcher]],
|
851 |
-
[
|
852 |
-
[
|
853 |
-
"up_blocks.1.resnets.0",
|
854 |
-
]
|
855 |
-
],
|
856 |
-
[
|
857 |
-
[
|
858 |
-
"up_blocks.1.resnets.1",
|
859 |
-
]
|
860 |
-
],
|
861 |
-
[
|
862 |
-
[
|
863 |
-
"up_blocks.1.resnets.2",
|
864 |
-
]
|
865 |
-
],
|
866 |
-
[
|
867 |
-
[
|
868 |
-
"up_blocks.1.upsamplers.0",
|
869 |
-
]
|
870 |
-
],
|
871 |
-
[["up_blocks.2.attentions.0", attention_fetcher]],
|
872 |
-
[["up_blocks.2.attentions.1", attention_fetcher]],
|
873 |
-
[["up_blocks.2.attentions.2", attention_fetcher]],
|
874 |
-
[
|
875 |
-
[
|
876 |
-
"up_blocks.2.resnets.0",
|
877 |
-
]
|
878 |
-
],
|
879 |
-
[
|
880 |
-
[
|
881 |
-
"up_blocks.2.resnets.1",
|
882 |
-
]
|
883 |
-
],
|
884 |
-
[
|
885 |
-
[
|
886 |
-
"up_blocks.2.resnets.2",
|
887 |
-
]
|
888 |
-
],
|
889 |
-
[
|
890 |
-
[
|
891 |
-
"up_blocks.2.upsamplers.0",
|
892 |
-
]
|
893 |
-
],
|
894 |
-
[["up_blocks.3.attentions.0", attention_fetcher]],
|
895 |
-
[["up_blocks.3.attentions.1", attention_fetcher]],
|
896 |
-
[["up_blocks.3.attentions.2", attention_fetcher]],
|
897 |
-
[
|
898 |
-
[
|
899 |
-
"up_blocks.3.resnets.0",
|
900 |
-
]
|
901 |
-
],
|
902 |
-
[
|
903 |
-
[
|
904 |
-
"up_blocks.3.resnets.1",
|
905 |
-
]
|
906 |
-
],
|
907 |
-
[
|
908 |
-
[
|
909 |
-
"up_blocks.3.resnets.2",
|
910 |
-
]
|
911 |
-
],
|
912 |
-
[["mid_block.attentions.0", attention_fetcher]],
|
913 |
-
[
|
914 |
-
[
|
915 |
-
"mid_block.resnets.0",
|
916 |
-
]
|
917 |
-
],
|
918 |
-
[
|
919 |
-
[
|
920 |
-
"mid_block.resnets.1",
|
921 |
-
]
|
922 |
-
],
|
923 |
-
[
|
924 |
-
[
|
925 |
-
"conv_out",
|
926 |
-
]
|
927 |
-
],
|
928 |
-
]
|
929 |
-
layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings]
|
930 |
-
if not set(layer_names).issubset([n[0] for n in model.named_modules()]):
|
931 |
-
raise ValueError(
|
932 |
-
"Provided model is not compatible with the default layer_mappings, "
|
933 |
-
'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", '
|
934 |
-
"or modify the layer_mappings variable to fit your model."
|
935 |
-
f"\nDefault layer_mappings are as such:\n{layer_mappings}"
|
936 |
-
)
|
937 |
-
from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig
|
938 |
-
|
939 |
-
distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig(
|
940 |
-
layer_mappings=layer_mappings,
|
941 |
-
loss_types=["MSE"] * len(layer_mappings),
|
942 |
-
loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings),
|
943 |
-
add_origin_loss=True,
|
944 |
-
)
|
945 |
-
d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)
|
946 |
-
confs.append(d_conf)
|
947 |
-
|
948 |
-
from neural_compressor.training import prepare_compression
|
949 |
-
|
950 |
-
compression_manager = prepare_compression(model, confs)
|
951 |
-
compression_manager.callbacks.on_train_begin()
|
952 |
-
model = compression_manager.model
|
953 |
-
train_func(model)
|
954 |
-
compression_manager.callbacks.on_train_end()
|
955 |
-
|
956 |
-
# Save the resulting model and its corresponding configuration in the given directory
|
957 |
-
model.save(args.output_dir)
|
958 |
-
|
959 |
-
logger.info(f"Optimized model saved to: {args.output_dir}.")
|
960 |
-
|
961 |
-
# change to framework model for further use
|
962 |
-
model = model.model
|
963 |
-
|
964 |
-
# Create the pipeline using using the trained modules and save it.
|
965 |
-
templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small
|
966 |
-
prompt = templates[0].format(args.placeholder_token)
|
967 |
-
if accelerator.is_main_process:
|
968 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
969 |
-
args.pretrained_model_name_or_path,
|
970 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
971 |
-
vae=vae,
|
972 |
-
unet=accelerator.unwrap_model(unet),
|
973 |
-
tokenizer=tokenizer,
|
974 |
-
)
|
975 |
-
pipeline.save_pretrained(args.output_dir)
|
976 |
-
pipeline = pipeline.to(unet.device)
|
977 |
-
baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
|
978 |
-
baseline_model_images.save(
|
979 |
-
os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split())))
|
980 |
-
)
|
981 |
-
|
982 |
-
if not train_unet:
|
983 |
-
# Also save the newly trained embeddings
|
984 |
-
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
|
985 |
-
save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
|
986 |
-
else:
|
987 |
-
setattr(pipeline, "unet", accelerator.unwrap_model(model))
|
988 |
-
if args.do_quantization:
|
989 |
-
pipeline = pipeline.to(torch.device("cpu"))
|
990 |
-
|
991 |
-
optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
|
992 |
-
optimized_model_images.save(
|
993 |
-
os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split())))
|
994 |
-
)
|
995 |
-
|
996 |
-
if args.push_to_hub:
|
997 |
-
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
998 |
-
|
999 |
-
accelerator.end_training()
|
1000 |
-
|
1001 |
-
if args.do_quantization and args.verify_loading:
|
1002 |
-
# Load the model obtained after Intel Neural Compressor quantization
|
1003 |
-
from neural_compressor.utils.pytorch import load
|
1004 |
-
|
1005 |
-
loaded_model = load(args.output_dir, model=unet)
|
1006 |
-
loaded_model.eval()
|
1007 |
-
|
1008 |
-
setattr(pipeline, "unet", loaded_model)
|
1009 |
-
if args.do_quantization:
|
1010 |
-
pipeline = pipeline.to(torch.device("cpu"))
|
1011 |
-
|
1012 |
-
loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
|
1013 |
-
if loaded_model_images != optimized_model_images:
|
1014 |
-
logger.info("The quantized model was not successfully loaded.")
|
1015 |
-
else:
|
1016 |
-
logger.info("The quantized model was successfully loaded.")
|
1017 |
-
|
1018 |
-
|
1019 |
-
if __name__ == "__main__":
|
1020 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ipndm.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
# Copyright 2023 Zhejiang University Team and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import math
|
16 |
-
from typing import List, Optional, Tuple, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
22 |
-
from .scheduling_utils import SchedulerMixin, SchedulerOutput
|
23 |
-
|
24 |
-
|
25 |
-
class IPNDMScheduler(SchedulerMixin, ConfigMixin):
|
26 |
-
"""
|
27 |
-
Improved Pseudo numerical methods for diffusion models (iPNDM) ported from @crowsonkb's amazing k-diffusion
|
28 |
-
[library](https://github.com/crowsonkb/v-diffusion-pytorch/blob/987f8985e38208345c1959b0ea767a625831cc9b/diffusion/sampling.py#L296)
|
29 |
-
|
30 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
31 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
32 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
33 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
34 |
-
|
35 |
-
For more details, see the original paper: https://arxiv.org/abs/2202.09778
|
36 |
-
|
37 |
-
Args:
|
38 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
39 |
-
"""
|
40 |
-
|
41 |
-
order = 1
|
42 |
-
|
43 |
-
@register_to_config
|
44 |
-
def __init__(
|
45 |
-
self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None
|
46 |
-
):
|
47 |
-
# set `betas`, `alphas`, `timesteps`
|
48 |
-
self.set_timesteps(num_train_timesteps)
|
49 |
-
|
50 |
-
# standard deviation of the initial noise distribution
|
51 |
-
self.init_noise_sigma = 1.0
|
52 |
-
|
53 |
-
# For now we only support F-PNDM, i.e. the runge-kutta method
|
54 |
-
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
|
55 |
-
# mainly at formula (9), (12), (13) and the Algorithm 2.
|
56 |
-
self.pndm_order = 4
|
57 |
-
|
58 |
-
# running values
|
59 |
-
self.ets = []
|
60 |
-
|
61 |
-
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
|
62 |
-
"""
|
63 |
-
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
|
64 |
-
|
65 |
-
Args:
|
66 |
-
num_inference_steps (`int`):
|
67 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
68 |
-
"""
|
69 |
-
self.num_inference_steps = num_inference_steps
|
70 |
-
steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1]
|
71 |
-
steps = torch.cat([steps, torch.tensor([0.0])])
|
72 |
-
|
73 |
-
if self.config.trained_betas is not None:
|
74 |
-
self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32)
|
75 |
-
else:
|
76 |
-
self.betas = torch.sin(steps * math.pi / 2) ** 2
|
77 |
-
|
78 |
-
self.alphas = (1.0 - self.betas**2) ** 0.5
|
79 |
-
|
80 |
-
timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1]
|
81 |
-
self.timesteps = timesteps.to(device)
|
82 |
-
|
83 |
-
self.ets = []
|
84 |
-
|
85 |
-
def step(
|
86 |
-
self,
|
87 |
-
model_output: torch.FloatTensor,
|
88 |
-
timestep: int,
|
89 |
-
sample: torch.FloatTensor,
|
90 |
-
return_dict: bool = True,
|
91 |
-
) -> Union[SchedulerOutput, Tuple]:
|
92 |
-
"""
|
93 |
-
Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
|
94 |
-
times to approximate the solution.
|
95 |
-
|
96 |
-
Args:
|
97 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
98 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
99 |
-
sample (`torch.FloatTensor`):
|
100 |
-
current instance of sample being created by diffusion process.
|
101 |
-
return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
|
102 |
-
|
103 |
-
Returns:
|
104 |
-
[`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
|
105 |
-
True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
106 |
-
|
107 |
-
"""
|
108 |
-
if self.num_inference_steps is None:
|
109 |
-
raise ValueError(
|
110 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
111 |
-
)
|
112 |
-
|
113 |
-
timestep_index = (self.timesteps == timestep).nonzero().item()
|
114 |
-
prev_timestep_index = timestep_index + 1
|
115 |
-
|
116 |
-
ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
|
117 |
-
self.ets.append(ets)
|
118 |
-
|
119 |
-
if len(self.ets) == 1:
|
120 |
-
ets = self.ets[-1]
|
121 |
-
elif len(self.ets) == 2:
|
122 |
-
ets = (3 * self.ets[-1] - self.ets[-2]) / 2
|
123 |
-
elif len(self.ets) == 3:
|
124 |
-
ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
|
125 |
-
else:
|
126 |
-
ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
|
127 |
-
|
128 |
-
prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets)
|
129 |
-
|
130 |
-
if not return_dict:
|
131 |
-
return (prev_sample,)
|
132 |
-
|
133 |
-
return SchedulerOutput(prev_sample=prev_sample)
|
134 |
-
|
135 |
-
def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
|
136 |
-
"""
|
137 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
138 |
-
current timestep.
|
139 |
-
|
140 |
-
Args:
|
141 |
-
sample (`torch.FloatTensor`): input sample
|
142 |
-
|
143 |
-
Returns:
|
144 |
-
`torch.FloatTensor`: scaled input sample
|
145 |
-
"""
|
146 |
-
return sample
|
147 |
-
|
148 |
-
def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets):
|
149 |
-
alpha = self.alphas[timestep_index]
|
150 |
-
sigma = self.betas[timestep_index]
|
151 |
-
|
152 |
-
next_alpha = self.alphas[prev_timestep_index]
|
153 |
-
next_sigma = self.betas[prev_timestep_index]
|
154 |
-
|
155 |
-
pred = (sample - sigma * ets) / max(alpha, 1e-8)
|
156 |
-
prev_sample = next_alpha * pred + ets * next_sigma
|
157 |
-
|
158 |
-
return prev_sample
|
159 |
-
|
160 |
-
def __len__(self):
|
161 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_3x_ms_hybrid_base/run.sh
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
|
3 |
-
work_path=$(dirname $0)
|
4 |
-
PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
|
5 |
-
python -m torch.distributed.launch --nproc_per_node=8 \
|
6 |
-
tools/train.py ${work_path}/config.py \
|
7 |
-
--launcher pytorch \
|
8 |
-
--cfg-options model.backbone.pretrained_path='your_model_path/uniformer_base_in1k.pth' \
|
9 |
-
--work-dir ${work_path}/ckpt \
|
10 |
-
2>&1 | tee -a ${work_path}/log.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_light/uniformer_light_video.py
DELETED
@@ -1,595 +0,0 @@
|
|
1 |
-
# All rights reserved.
|
2 |
-
from math import ceil, sqrt
|
3 |
-
from collections import OrderedDict
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from functools import partial
|
7 |
-
from timm.models.vision_transformer import _cfg
|
8 |
-
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
|
9 |
-
import os
|
10 |
-
|
11 |
-
|
12 |
-
global_attn = None
|
13 |
-
token_indices = None
|
14 |
-
|
15 |
-
model_path = 'path_to_models'
|
16 |
-
model_path = {
|
17 |
-
'uniformer_xxs_128_in1k': os.path.join(model_path, 'uniformer_xxs_128_in1k.pth'),
|
18 |
-
'uniformer_xxs_160_in1k': os.path.join(model_path, 'uniformer_xxs_160_in1k.pth'),
|
19 |
-
'uniformer_xxs_192_in1k': os.path.join(model_path, 'uniformer_xxs_192_in1k.pth'),
|
20 |
-
'uniformer_xxs_224_in1k': os.path.join(model_path, 'uniformer_xxs_224_in1k.pth'),
|
21 |
-
'uniformer_xs_192_in1k': os.path.join(model_path, 'uniformer_xs_192_in1k.pth'),
|
22 |
-
'uniformer_xs_224_in1k': os.path.join(model_path, 'uniformer_xs_224_in1k.pth'),
|
23 |
-
}
|
24 |
-
|
25 |
-
|
26 |
-
def conv_3xnxn(inp, oup, kernel_size=3, stride=3, groups=1):
|
27 |
-
return nn.Conv3d(inp, oup, (3, kernel_size, kernel_size), (2, stride, stride), (1, 0, 0), groups=groups)
|
28 |
-
|
29 |
-
def conv_1xnxn(inp, oup, kernel_size=3, stride=3, groups=1):
|
30 |
-
return nn.Conv3d(inp, oup, (1, kernel_size, kernel_size), (1, stride, stride), (0, 0, 0), groups=groups)
|
31 |
-
|
32 |
-
def conv_3xnxn_std(inp, oup, kernel_size=3, stride=3, groups=1):
|
33 |
-
return nn.Conv3d(inp, oup, (3, kernel_size, kernel_size), (1, stride, stride), (1, 0, 0), groups=groups)
|
34 |
-
|
35 |
-
def conv_1x1x1(inp, oup, groups=1):
|
36 |
-
return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups)
|
37 |
-
|
38 |
-
def conv_3x3x3(inp, oup, groups=1):
|
39 |
-
return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups)
|
40 |
-
|
41 |
-
def conv_5x5x5(inp, oup, groups=1):
|
42 |
-
return nn.Conv3d(inp, oup, (5, 5, 5), (1, 1, 1), (2, 2, 2), groups=groups)
|
43 |
-
|
44 |
-
def bn_3d(dim):
|
45 |
-
return nn.BatchNorm3d(dim)
|
46 |
-
|
47 |
-
|
48 |
-
# code is from https://github.com/YifanXu74/Evo-ViT
|
49 |
-
def easy_gather(x, indices):
|
50 |
-
# x => B x N x C
|
51 |
-
# indices => B x N
|
52 |
-
B, N, C = x.shape
|
53 |
-
N_new = indices.shape[1]
|
54 |
-
offset = torch.arange(B, dtype=torch.long, device=x.device).view(B, 1) * N
|
55 |
-
indices = indices + offset
|
56 |
-
# only select the informative tokens
|
57 |
-
out = x.reshape(B * N, C)[indices.view(-1)].reshape(B, N_new, C)
|
58 |
-
return out
|
59 |
-
|
60 |
-
|
61 |
-
# code is from https://github.com/YifanXu74/Evo-ViT
|
62 |
-
def merge_tokens(x_drop, score):
|
63 |
-
# x_drop => B x N_drop
|
64 |
-
# score => B x N_drop
|
65 |
-
weight = score / torch.sum(score, dim=1, keepdim=True)
|
66 |
-
x_drop = weight.unsqueeze(-1) * x_drop
|
67 |
-
return torch.sum(x_drop, dim=1, keepdim=True)
|
68 |
-
|
69 |
-
|
70 |
-
class Mlp(nn.Module):
|
71 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
72 |
-
super().__init__()
|
73 |
-
out_features = out_features or in_features
|
74 |
-
hidden_features = hidden_features or in_features
|
75 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
76 |
-
self.act = act_layer()
|
77 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
78 |
-
self.drop = nn.Dropout(drop)
|
79 |
-
|
80 |
-
def forward(self, x):
|
81 |
-
x = self.fc1(x)
|
82 |
-
x = self.act(x)
|
83 |
-
x = self.drop(x)
|
84 |
-
x = self.fc2(x)
|
85 |
-
x = self.drop(x)
|
86 |
-
return x
|
87 |
-
|
88 |
-
|
89 |
-
class Attention(nn.Module):
|
90 |
-
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., trade_off=1):
|
91 |
-
super().__init__()
|
92 |
-
self.num_heads = num_heads
|
93 |
-
head_dim = dim // num_heads
|
94 |
-
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
95 |
-
self.scale = qk_scale or head_dim ** -0.5
|
96 |
-
|
97 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
98 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
99 |
-
self.proj = nn.Linear(dim, dim)
|
100 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
101 |
-
# updating weight for global score
|
102 |
-
self.trade_off = trade_off
|
103 |
-
|
104 |
-
def forward(self, x):
|
105 |
-
B, N, C = x.shape
|
106 |
-
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
107 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
108 |
-
|
109 |
-
attn = (q @ k.transpose(-2, -1)) * self.scale
|
110 |
-
attn = attn.softmax(dim=-1)
|
111 |
-
|
112 |
-
# update global score
|
113 |
-
global global_attn
|
114 |
-
tradeoff = self.trade_off
|
115 |
-
if isinstance(global_attn, int):
|
116 |
-
global_attn = torch.mean(attn[:, :, 0, 1:], dim=1)
|
117 |
-
elif global_attn.shape[1] == N - 1:
|
118 |
-
# no additional token and no pruning, update all global scores
|
119 |
-
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1)
|
120 |
-
global_attn = (1 - tradeoff) * global_attn + tradeoff * cls_attn
|
121 |
-
else:
|
122 |
-
# only update the informative tokens
|
123 |
-
# the first one is class token
|
124 |
-
# the last one is rrepresentative token
|
125 |
-
cls_attn = torch.mean(attn[:, :, 0, 1:-1], dim=1)
|
126 |
-
if self.training:
|
127 |
-
temp_attn = (1 - tradeoff) * global_attn[:, :(N - 2)] + tradeoff * cls_attn
|
128 |
-
global_attn = torch.cat((temp_attn, global_attn[:, (N - 2):]), dim=1)
|
129 |
-
else:
|
130 |
-
# no use torch.cat() for fast inference
|
131 |
-
global_attn[:, :(N - 2)] = (1 - tradeoff) * global_attn[:, :(N - 2)] + tradeoff * cls_attn
|
132 |
-
|
133 |
-
attn = self.attn_drop(attn)
|
134 |
-
|
135 |
-
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
136 |
-
x = self.proj(x)
|
137 |
-
x = self.proj_drop(x)
|
138 |
-
return x
|
139 |
-
|
140 |
-
|
141 |
-
class CMlp(nn.Module):
|
142 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
143 |
-
super().__init__()
|
144 |
-
out_features = out_features or in_features
|
145 |
-
hidden_features = hidden_features or in_features
|
146 |
-
self.fc1 = conv_1x1x1(in_features, hidden_features)
|
147 |
-
self.act = act_layer()
|
148 |
-
self.fc2 = conv_1x1x1(hidden_features, out_features)
|
149 |
-
self.drop = nn.Dropout(drop)
|
150 |
-
|
151 |
-
def forward(self, x):
|
152 |
-
x = self.fc1(x)
|
153 |
-
x = self.act(x)
|
154 |
-
x = self.drop(x)
|
155 |
-
x = self.fc2(x)
|
156 |
-
x = self.drop(x)
|
157 |
-
return x
|
158 |
-
|
159 |
-
|
160 |
-
class CBlock(nn.Module):
|
161 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
162 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
163 |
-
super().__init__()
|
164 |
-
self.pos_embed = conv_3x3x3(dim, dim, groups=dim)
|
165 |
-
self.norm1 = bn_3d(dim)
|
166 |
-
self.conv1 = conv_1x1x1(dim, dim, 1)
|
167 |
-
self.conv2 = conv_1x1x1(dim, dim, 1)
|
168 |
-
self.attn = conv_5x5x5(dim, dim, groups=dim)
|
169 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
170 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
171 |
-
self.norm2 = bn_3d(dim)
|
172 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
173 |
-
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
174 |
-
|
175 |
-
def forward(self, x):
|
176 |
-
x = x + self.pos_embed(x)
|
177 |
-
x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
|
178 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
179 |
-
return x
|
180 |
-
|
181 |
-
|
182 |
-
class EvoSABlock(nn.Module):
|
183 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
184 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, prune_ratio=1,
|
185 |
-
trade_off=0, downsample=False):
|
186 |
-
super().__init__()
|
187 |
-
self.pos_embed = conv_3x3x3(dim, dim, groups=dim)
|
188 |
-
self.norm1 = norm_layer(dim)
|
189 |
-
self.attn = Attention(
|
190 |
-
dim,
|
191 |
-
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
192 |
-
attn_drop=attn_drop, proj_drop=drop, trade_off=trade_off)
|
193 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
194 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
195 |
-
self.norm2 = norm_layer(dim)
|
196 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
197 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
198 |
-
self.prune_ratio = prune_ratio
|
199 |
-
self.downsample = downsample
|
200 |
-
if downsample:
|
201 |
-
self.avgpool = nn.AvgPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
|
202 |
-
|
203 |
-
def forward(self, cls_token, x):
|
204 |
-
x = x + self.pos_embed(x)
|
205 |
-
B, C, T, H, W = x.shape
|
206 |
-
x = x.flatten(2).transpose(1, 2)
|
207 |
-
|
208 |
-
if self.prune_ratio == 1:
|
209 |
-
x = torch.cat([cls_token, x], dim=1)
|
210 |
-
x = x + self.drop_path(self.attn(self.norm1(x)))
|
211 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
212 |
-
cls_token, x = x[:, :1], x[:, 1:]
|
213 |
-
x = x.transpose(1, 2).reshape(B, C, T, H, W)
|
214 |
-
return cls_token, x
|
215 |
-
else:
|
216 |
-
global global_attn, token_indices
|
217 |
-
# calculate the number of informative tokens
|
218 |
-
N = x.shape[1]
|
219 |
-
N_ = int(N * self.prune_ratio)
|
220 |
-
# sort global attention
|
221 |
-
indices = torch.argsort(global_attn, dim=1, descending=True)
|
222 |
-
|
223 |
-
# concatenate x, global attention and token indices => x_ga_ti
|
224 |
-
# rearrange the tensor according to new indices
|
225 |
-
x_ga_ti = torch.cat((x, global_attn.unsqueeze(-1), token_indices.unsqueeze(-1)), dim=-1)
|
226 |
-
x_ga_ti = easy_gather(x_ga_ti, indices)
|
227 |
-
x_sorted, global_attn, token_indices = x_ga_ti[:, :, :-2], x_ga_ti[:, :, -2], x_ga_ti[:, :, -1]
|
228 |
-
|
229 |
-
# informative tokens
|
230 |
-
x_info = x_sorted[:, :N_]
|
231 |
-
# merge dropped tokens
|
232 |
-
x_drop = x_sorted[:, N_:]
|
233 |
-
score = global_attn[:, N_:]
|
234 |
-
# B x N_drop x C => B x 1 x C
|
235 |
-
rep_token = merge_tokens(x_drop, score)
|
236 |
-
# concatenate new tokens
|
237 |
-
x = torch.cat((cls_token, x_info, rep_token), dim=1)
|
238 |
-
|
239 |
-
# slow update
|
240 |
-
fast_update = 0
|
241 |
-
tmp_x = self.attn(self.norm1(x))
|
242 |
-
fast_update = fast_update + tmp_x[:, -1:]
|
243 |
-
x = x + self.drop_path(tmp_x)
|
244 |
-
tmp_x = self.mlp(self.norm2(x))
|
245 |
-
fast_update = fast_update + tmp_x[:, -1:]
|
246 |
-
x = x + self.drop_path(tmp_x)
|
247 |
-
# fast update
|
248 |
-
x_drop = x_drop + fast_update.expand(-1, N - N_, -1)
|
249 |
-
|
250 |
-
cls_token, x = x[:, :1, :], x[:, 1:-1, :]
|
251 |
-
if self.training:
|
252 |
-
x_sorted = torch.cat((x, x_drop), dim=1)
|
253 |
-
else:
|
254 |
-
x_sorted[:, N_:] = x_drop
|
255 |
-
x_sorted[:, :N_] = x
|
256 |
-
|
257 |
-
# recover token
|
258 |
-
# scale for normalization
|
259 |
-
old_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
|
260 |
-
# recover order
|
261 |
-
indices = torch.argsort(token_indices, dim=1)
|
262 |
-
x_ga_ti = torch.cat((x_sorted, global_attn.unsqueeze(-1), token_indices.unsqueeze(-1)), dim=-1)
|
263 |
-
x_ga_ti = easy_gather(x_ga_ti, indices)
|
264 |
-
x_patch, global_attn, token_indices = x_ga_ti[:, :, :-2], x_ga_ti[:, :, -2], x_ga_ti[:, :, -1]
|
265 |
-
x_patch = x_patch.transpose(1, 2).reshape(B, C, T, H, W)
|
266 |
-
|
267 |
-
if self.downsample:
|
268 |
-
# downsample global attention
|
269 |
-
global_attn = global_attn.reshape(B, 1, T, H, W)
|
270 |
-
global_attn = self.avgpool(global_attn).view(B, -1)
|
271 |
-
# normalize global attention
|
272 |
-
new_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
|
273 |
-
scale = old_global_scale / new_global_scale
|
274 |
-
global_attn = global_attn * scale
|
275 |
-
|
276 |
-
return cls_token, x_patch
|
277 |
-
|
278 |
-
|
279 |
-
class SABlock(nn.Module):
|
280 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
281 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
282 |
-
super().__init__()
|
283 |
-
self.pos_embed = conv_3x3x3(dim, dim, groups=dim)
|
284 |
-
self.norm1 = norm_layer(dim)
|
285 |
-
self.attn = Attention(
|
286 |
-
dim,
|
287 |
-
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
288 |
-
attn_drop=attn_drop, proj_drop=drop)
|
289 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
290 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
291 |
-
self.norm2 = norm_layer(dim)
|
292 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
293 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
294 |
-
|
295 |
-
def forward(self, x):
|
296 |
-
x = x + self.pos_embed(x)
|
297 |
-
B, C, T, H, W = x.shape
|
298 |
-
x = x.flatten(2).transpose(1, 2)
|
299 |
-
x = x + self.drop_path(self.attn(self.norm1(x)))
|
300 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
301 |
-
x = x.transpose(1, 2).reshape(B, C, T, H, W)
|
302 |
-
return x
|
303 |
-
|
304 |
-
|
305 |
-
class SplitSABlock(nn.Module):
|
306 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
307 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
308 |
-
super().__init__()
|
309 |
-
self.pos_embed = conv_3x3x3(dim, dim, groups=dim)
|
310 |
-
self.t_norm = norm_layer(dim)
|
311 |
-
self.t_attn = Attention(
|
312 |
-
dim,
|
313 |
-
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
314 |
-
attn_drop=attn_drop, proj_drop=drop)
|
315 |
-
self.norm1 = norm_layer(dim)
|
316 |
-
self.attn = Attention(
|
317 |
-
dim,
|
318 |
-
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
319 |
-
attn_drop=attn_drop, proj_drop=drop)
|
320 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
321 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
322 |
-
self.norm2 = norm_layer(dim)
|
323 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
324 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
325 |
-
|
326 |
-
def forward(self, x):
|
327 |
-
x = x + self.pos_embed(x)
|
328 |
-
B, C, T, H, W = x.shape
|
329 |
-
attn = x.view(B, C, T, H * W).permute(0, 3, 2, 1).contiguous()
|
330 |
-
attn = attn.view(B * H * W, T, C)
|
331 |
-
attn = attn + self.drop_path(self.t_attn(self.t_norm(attn)))
|
332 |
-
attn = attn.view(B, H * W, T, C).permute(0, 2, 1, 3).contiguous()
|
333 |
-
attn = attn.view(B * T, H * W, C)
|
334 |
-
residual = x.view(B, C, T, H * W).permute(0, 2, 3, 1).contiguous()
|
335 |
-
residual = residual.view(B * T, H * W, C)
|
336 |
-
attn = residual + self.drop_path(self.attn(self.norm1(attn)))
|
337 |
-
attn = attn.view(B, T * H * W, C)
|
338 |
-
out = attn + self.drop_path(self.mlp(self.norm2(attn)))
|
339 |
-
out = out.transpose(1, 2).reshape(B, C, T, H, W)
|
340 |
-
return out
|
341 |
-
|
342 |
-
|
343 |
-
class SpeicalPatchEmbed(nn.Module):
|
344 |
-
""" Image to Patch Embedding
|
345 |
-
"""
|
346 |
-
def __init__(self, patch_size=16, in_chans=3, embed_dim=768):
|
347 |
-
super().__init__()
|
348 |
-
patch_size = to_2tuple(patch_size)
|
349 |
-
self.patch_size = patch_size
|
350 |
-
|
351 |
-
self.proj = nn.Sequential(
|
352 |
-
nn.Conv3d(in_chans, embed_dim // 2, kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1)),
|
353 |
-
nn.BatchNorm3d(embed_dim // 2),
|
354 |
-
nn.GELU(),
|
355 |
-
nn.Conv3d(embed_dim // 2, embed_dim, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
|
356 |
-
nn.BatchNorm3d(embed_dim),
|
357 |
-
)
|
358 |
-
|
359 |
-
def forward(self, x):
|
360 |
-
B, C, T, H, W = x.shape
|
361 |
-
# FIXME look at relaxing size constraints
|
362 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
363 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
364 |
-
x = self.proj(x)
|
365 |
-
B, C, T, H, W = x.shape
|
366 |
-
x = x.flatten(2).transpose(1, 2)
|
367 |
-
x = x.reshape(B, T, H, W, -1).permute(0, 4, 1, 2, 3).contiguous()
|
368 |
-
return x
|
369 |
-
|
370 |
-
|
371 |
-
class PatchEmbed(nn.Module):
|
372 |
-
""" Image to Patch Embedding
|
373 |
-
"""
|
374 |
-
def __init__(self, patch_size=16, in_chans=3, embed_dim=768):
|
375 |
-
super().__init__()
|
376 |
-
patch_size = to_2tuple(patch_size)
|
377 |
-
self.patch_size = patch_size
|
378 |
-
self.norm = nn.LayerNorm(embed_dim)
|
379 |
-
self.proj = conv_1xnxn(in_chans, embed_dim, kernel_size=patch_size[0], stride=patch_size[0])
|
380 |
-
|
381 |
-
def forward(self, x):
|
382 |
-
B, C, T, H, W = x.shape
|
383 |
-
# FIXME look at relaxing size constraints
|
384 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
385 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
386 |
-
x = self.proj(x)
|
387 |
-
B, C, T, H, W = x.shape
|
388 |
-
x = x.flatten(2).transpose(1, 2)
|
389 |
-
x = self.norm(x)
|
390 |
-
x = x.reshape(B, T, H, W, -1).permute(0, 4, 1, 2, 3).contiguous()
|
391 |
-
return x
|
392 |
-
|
393 |
-
|
394 |
-
class Uniformer_light(nn.Module):
|
395 |
-
""" Vision Transformer
|
396 |
-
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
397 |
-
https://arxiv.org/abs/2010.11929
|
398 |
-
"""
|
399 |
-
def __init__(self, depth=[3, 4, 8, 3], in_chans=3, num_classes=400, embed_dim=[64, 128, 320, 512],
|
400 |
-
head_dim=64, mlp_ratio=[4., 4., 4., 4.], qkv_bias=True, qk_scale=None, representation_size=None,
|
401 |
-
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
|
402 |
-
prune_ratio=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
|
403 |
-
trade_off=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
|
404 |
-
):
|
405 |
-
super().__init__()
|
406 |
-
|
407 |
-
self.num_classes = num_classes
|
408 |
-
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
409 |
-
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
410 |
-
|
411 |
-
self.patch_embed1 = SpeicalPatchEmbed(
|
412 |
-
patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
|
413 |
-
self.patch_embed2 = PatchEmbed(
|
414 |
-
patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
|
415 |
-
self.patch_embed3 = PatchEmbed(
|
416 |
-
patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
|
417 |
-
self.patch_embed4 = PatchEmbed(
|
418 |
-
patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
|
419 |
-
|
420 |
-
# class token
|
421 |
-
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim[2]))
|
422 |
-
self.cls_upsample = nn.Linear(embed_dim[2], embed_dim[3])
|
423 |
-
|
424 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
425 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] # stochastic depth decay rule
|
426 |
-
num_heads = [dim // head_dim for dim in embed_dim]
|
427 |
-
self.blocks1 = nn.ModuleList([
|
428 |
-
CBlock(
|
429 |
-
dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
430 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
|
431 |
-
for i in range(depth[0])])
|
432 |
-
self.blocks2 = nn.ModuleList([
|
433 |
-
CBlock(
|
434 |
-
dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
435 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]], norm_layer=norm_layer)
|
436 |
-
for i in range(depth[1])])
|
437 |
-
self.blocks3 = nn.ModuleList([
|
438 |
-
EvoSABlock(
|
439 |
-
dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
440 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]], norm_layer=norm_layer,
|
441 |
-
prune_ratio=prune_ratio[2][i], trade_off=trade_off[2][i],
|
442 |
-
downsample=True if i == depth[2] - 1 else False)
|
443 |
-
for i in range(depth[2])])
|
444 |
-
self.blocks4 = nn.ModuleList([
|
445 |
-
EvoSABlock(
|
446 |
-
dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
447 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+depth[0]+depth[1]+depth[2]], norm_layer=norm_layer,
|
448 |
-
prune_ratio=prune_ratio[3][i], trade_off=trade_off[3][i])
|
449 |
-
for i in range(depth[3])])
|
450 |
-
self.norm = bn_3d(embed_dim[-1])
|
451 |
-
self.norm_cls = nn.LayerNorm(embed_dim[-1])
|
452 |
-
|
453 |
-
# Representation layer
|
454 |
-
if representation_size:
|
455 |
-
self.num_features = representation_size
|
456 |
-
self.pre_logits = nn.Sequential(OrderedDict([
|
457 |
-
('fc', nn.Linear(embed_dim, representation_size)),
|
458 |
-
('act', nn.Tanh())
|
459 |
-
]))
|
460 |
-
else:
|
461 |
-
self.pre_logits = nn.Identity()
|
462 |
-
|
463 |
-
# Classifier head
|
464 |
-
self.head = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
|
465 |
-
self.head_cls = nn.Linear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity()
|
466 |
-
|
467 |
-
self.apply(self._init_weights)
|
468 |
-
|
469 |
-
for name, p in self.named_parameters():
|
470 |
-
# fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs
|
471 |
-
# are multiplied by 0*0, which is hard for the model to move out of.
|
472 |
-
if 't_attn.qkv.weight' in name:
|
473 |
-
nn.init.constant_(p, 0)
|
474 |
-
if 't_attn.qkv.bias' in name:
|
475 |
-
nn.init.constant_(p, 0)
|
476 |
-
if 't_attn.proj.weight' in name:
|
477 |
-
nn.init.constant_(p, 1)
|
478 |
-
if 't_attn.proj.bias' in name:
|
479 |
-
nn.init.constant_(p, 0)
|
480 |
-
|
481 |
-
def _init_weights(self, m):
|
482 |
-
if isinstance(m, nn.Linear):
|
483 |
-
trunc_normal_(m.weight, std=.02)
|
484 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
485 |
-
nn.init.constant_(m.bias, 0)
|
486 |
-
elif isinstance(m, nn.LayerNorm):
|
487 |
-
nn.init.constant_(m.bias, 0)
|
488 |
-
nn.init.constant_(m.weight, 1.0)
|
489 |
-
|
490 |
-
@torch.jit.ignore
|
491 |
-
def no_weight_decay(self):
|
492 |
-
return {'pos_embed', 'cls_token'}
|
493 |
-
|
494 |
-
def get_classifier(self):
|
495 |
-
return self.head
|
496 |
-
|
497 |
-
def reset_classifier(self, num_classes, global_pool=''):
|
498 |
-
self.num_classes = num_classes
|
499 |
-
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
500 |
-
|
501 |
-
def inflate_weight(self, weight_2d, time_dim, center=False):
|
502 |
-
if center:
|
503 |
-
weight_3d = torch.zeros(*weight_2d.shape)
|
504 |
-
weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
|
505 |
-
middle_idx = time_dim // 2
|
506 |
-
weight_3d[:, :, middle_idx, :, :] = weight_2d
|
507 |
-
else:
|
508 |
-
weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1)
|
509 |
-
weight_3d = weight_3d / time_dim
|
510 |
-
return weight_3d
|
511 |
-
|
512 |
-
def forward_features(self, x):
|
513 |
-
x = self.patch_embed1(x)
|
514 |
-
x = self.pos_drop(x)
|
515 |
-
for blk in self.blocks1:
|
516 |
-
x = blk(x)
|
517 |
-
x = self.patch_embed2(x)
|
518 |
-
for blk in self.blocks2:
|
519 |
-
x = blk(x)
|
520 |
-
x = self.patch_embed3(x)
|
521 |
-
# add cls_token in stage3
|
522 |
-
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
|
523 |
-
global global_attn, token_indices
|
524 |
-
global_attn = 0
|
525 |
-
token_indices = torch.arange(x.shape[2] * x.shape[3] * x.shape[4], dtype=torch.long, device=x.device).unsqueeze(0)
|
526 |
-
token_indices = token_indices.expand(x.shape[0], -1)
|
527 |
-
for blk in self.blocks3:
|
528 |
-
cls_token, x = blk(cls_token, x)
|
529 |
-
# upsample cls_token before stage4
|
530 |
-
cls_token = self.cls_upsample(cls_token)
|
531 |
-
x = self.patch_embed4(x)
|
532 |
-
# whether reset global attention? Now simple avgpool
|
533 |
-
token_indices = torch.arange(x.shape[2] * x.shape[3] * x.shape[4], dtype=torch.long, device=x.device).unsqueeze(0)
|
534 |
-
token_indices = token_indices.expand(x.shape[0], -1)
|
535 |
-
for blk in self.blocks4:
|
536 |
-
cls_token, x = blk(cls_token, x)
|
537 |
-
if self.training:
|
538 |
-
# layer normalization for cls_token
|
539 |
-
cls_token = self.norm_cls(cls_token)
|
540 |
-
x = self.norm(x)
|
541 |
-
x = self.pre_logits(x)
|
542 |
-
return cls_token, x
|
543 |
-
|
544 |
-
def forward(self, x):
|
545 |
-
cls_token, x = self.forward_features(x)
|
546 |
-
x = x.flatten(2).mean(-1)
|
547 |
-
if self.training:
|
548 |
-
x = self.head(x), self.head_cls(cls_token.squeeze(1))
|
549 |
-
else:
|
550 |
-
x = self.head(x)
|
551 |
-
return x
|
552 |
-
|
553 |
-
|
554 |
-
def uniformer_xxs_video(**kwargs):
|
555 |
-
model = Uniformer_light(
|
556 |
-
depth=[2, 5, 8, 2],
|
557 |
-
prune_ratio=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5]],
|
558 |
-
trade_off=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5]],
|
559 |
-
embed_dim=[56, 112, 224, 448], head_dim=28, mlp_ratio=[3, 3, 3, 3], qkv_bias=True,
|
560 |
-
**kwargs)
|
561 |
-
model.default_cfg = _cfg()
|
562 |
-
return model
|
563 |
-
|
564 |
-
|
565 |
-
def uniformer_xs_video(**kwargs):
|
566 |
-
model = Uniformer_light(
|
567 |
-
depth=[3, 5, 9, 3],
|
568 |
-
prune_ratio=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
|
569 |
-
trade_off=[[], [], [1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
|
570 |
-
embed_dim=[64, 128, 256, 512], head_dim=32, mlp_ratio=[3, 3, 3, 3], qkv_bias=True,
|
571 |
-
**kwargs)
|
572 |
-
model.default_cfg = _cfg()
|
573 |
-
return model
|
574 |
-
|
575 |
-
|
576 |
-
if __name__ == '__main__':
|
577 |
-
import time
|
578 |
-
from fvcore.nn import FlopCountAnalysis
|
579 |
-
from fvcore.nn import flop_count_table
|
580 |
-
import numpy as np
|
581 |
-
|
582 |
-
seed = 4217
|
583 |
-
np.random.seed(seed)
|
584 |
-
torch.manual_seed(seed)
|
585 |
-
torch.cuda.manual_seed(seed)
|
586 |
-
torch.cuda.manual_seed_all(seed)
|
587 |
-
num_frames = 16
|
588 |
-
|
589 |
-
model = uniformer_xxs_video()
|
590 |
-
# print(model)
|
591 |
-
|
592 |
-
flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 160, 160))
|
593 |
-
s = time.time()
|
594 |
-
print(flop_count_table(flops, max_depth=1))
|
595 |
-
print(time.time()-s)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andyrasika/Andyrasika-lora_diffusion/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Andyrasika-lora Diffusion
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.37.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/metadata_gguf.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import struct
|
2 |
-
from enum import IntEnum
|
3 |
-
|
4 |
-
|
5 |
-
class GGUFValueType(IntEnum):
|
6 |
-
UINT8 = 0
|
7 |
-
INT8 = 1
|
8 |
-
UINT16 = 2
|
9 |
-
INT16 = 3
|
10 |
-
UINT32 = 4
|
11 |
-
INT32 = 5
|
12 |
-
FLOAT32 = 6
|
13 |
-
BOOL = 7
|
14 |
-
STRING = 8
|
15 |
-
ARRAY = 9
|
16 |
-
UINT64 = 10
|
17 |
-
INT64 = 11
|
18 |
-
FLOAT64 = 12
|
19 |
-
|
20 |
-
|
21 |
-
_simple_value_packing = {
|
22 |
-
GGUFValueType.UINT8: "<B",
|
23 |
-
GGUFValueType.INT8: "<b",
|
24 |
-
GGUFValueType.UINT16: "<H",
|
25 |
-
GGUFValueType.INT16: "<h",
|
26 |
-
GGUFValueType.UINT32: "<I",
|
27 |
-
GGUFValueType.INT32: "<i",
|
28 |
-
GGUFValueType.FLOAT32: "<f",
|
29 |
-
GGUFValueType.UINT64: "<Q",
|
30 |
-
GGUFValueType.INT64: "<q",
|
31 |
-
GGUFValueType.FLOAT64: "<d",
|
32 |
-
GGUFValueType.BOOL: "?",
|
33 |
-
}
|
34 |
-
|
35 |
-
value_type_info = {
|
36 |
-
GGUFValueType.UINT8: 1,
|
37 |
-
GGUFValueType.INT8: 1,
|
38 |
-
GGUFValueType.UINT16: 2,
|
39 |
-
GGUFValueType.INT16: 2,
|
40 |
-
GGUFValueType.UINT32: 4,
|
41 |
-
GGUFValueType.INT32: 4,
|
42 |
-
GGUFValueType.FLOAT32: 4,
|
43 |
-
GGUFValueType.UINT64: 8,
|
44 |
-
GGUFValueType.INT64: 8,
|
45 |
-
GGUFValueType.FLOAT64: 8,
|
46 |
-
GGUFValueType.BOOL: 1,
|
47 |
-
}
|
48 |
-
|
49 |
-
|
50 |
-
def get_single(value_type, file):
|
51 |
-
if value_type == GGUFValueType.STRING:
|
52 |
-
value_length = struct.unpack("<Q", file.read(8))[0]
|
53 |
-
value = file.read(value_length)
|
54 |
-
try:
|
55 |
-
value = value.decode('utf-8')
|
56 |
-
except:
|
57 |
-
pass
|
58 |
-
else:
|
59 |
-
type_str = _simple_value_packing.get(value_type)
|
60 |
-
bytes_length = value_type_info.get(value_type)
|
61 |
-
value = struct.unpack(type_str, file.read(bytes_length))[0]
|
62 |
-
|
63 |
-
return value
|
64 |
-
|
65 |
-
|
66 |
-
def load_metadata(fname):
|
67 |
-
metadata = {}
|
68 |
-
with open(fname, 'rb') as file:
|
69 |
-
GGUF_MAGIC = struct.unpack("<I", file.read(4))[0]
|
70 |
-
GGUF_VERSION = struct.unpack("<I", file.read(4))[0]
|
71 |
-
ti_data_count = struct.unpack("<Q", file.read(8))[0]
|
72 |
-
kv_data_count = struct.unpack("<Q", file.read(8))[0]
|
73 |
-
|
74 |
-
if GGUF_VERSION == 1:
|
75 |
-
raise Exception('You are using an outdated GGUF, please download a new one.')
|
76 |
-
|
77 |
-
for i in range(kv_data_count):
|
78 |
-
key_length = struct.unpack("<Q", file.read(8))[0]
|
79 |
-
key = file.read(key_length)
|
80 |
-
|
81 |
-
value_type = GGUFValueType(struct.unpack("<I", file.read(4))[0])
|
82 |
-
if value_type == GGUFValueType.ARRAY:
|
83 |
-
ltype = GGUFValueType(struct.unpack("<I", file.read(4))[0])
|
84 |
-
length = struct.unpack("<Q", file.read(8))[0]
|
85 |
-
for j in range(length):
|
86 |
-
_ = get_single(ltype, file)
|
87 |
-
else:
|
88 |
-
value = get_single(value_type, file)
|
89 |
-
metadata[key.decode()] = value
|
90 |
-
|
91 |
-
return metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_chat.py
DELETED
@@ -1,352 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from functools import partial
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
-
import gradio as gr
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
from modules import chat, prompts, shared, ui, utils
|
9 |
-
from modules.html_generator import chat_html_wrapper
|
10 |
-
from modules.text_generation import stop_everything_event
|
11 |
-
from modules.utils import gradio
|
12 |
-
|
13 |
-
inputs = ('Chat input', 'interface_state')
|
14 |
-
reload_arr = ('history', 'name1', 'name2', 'mode', 'chat_style')
|
15 |
-
clear_arr = ('delete_chat-confirm', 'delete_chat', 'delete_chat-cancel')
|
16 |
-
|
17 |
-
|
18 |
-
def create_ui():
|
19 |
-
mu = shared.args.multi_user
|
20 |
-
|
21 |
-
shared.gradio['Chat input'] = gr.State()
|
22 |
-
shared.gradio['dummy'] = gr.State()
|
23 |
-
shared.gradio['history'] = gr.State({'internal': [], 'visible': []})
|
24 |
-
|
25 |
-
with gr.Tab('Chat', elem_id='chat-tab', elem_classes=("old-ui" if shared.args.chat_buttons else None)):
|
26 |
-
with gr.Row():
|
27 |
-
with gr.Column(elem_id='chat-col'):
|
28 |
-
shared.gradio['display'] = gr.HTML(value=chat_html_wrapper({'internal': [], 'visible': []}, '', '', 'chat', 'cai-chat'))
|
29 |
-
|
30 |
-
with gr.Row(elem_id="chat-input-row"):
|
31 |
-
with gr.Column(scale=1, elem_id='gr-hover-container'):
|
32 |
-
gr.HTML(value='<div class="hover-element" onclick="void(0)"><span style="width: 100px; display: block" id="hover-element-button">☰</span><div class="hover-menu" id="hover-menu"></div>', elem_id='gr-hover')
|
33 |
-
|
34 |
-
with gr.Column(scale=10, elem_id='chat-input-container'):
|
35 |
-
shared.gradio['textbox'] = gr.Textbox(label='', placeholder='Send a message', elem_id='chat-input', elem_classes=['add_scrollbar'])
|
36 |
-
shared.gradio['show_controls'] = gr.Checkbox(value=shared.settings['show_controls'], label='Show controls (Ctrl+S)', elem_id='show-controls')
|
37 |
-
shared.gradio['typing-dots'] = gr.HTML(value='<div class="typing"><span></span><span class="dot1"></span><span class="dot2"></span></div>', label='typing', elem_id='typing-container')
|
38 |
-
|
39 |
-
with gr.Column(scale=1, elem_id='generate-stop-container'):
|
40 |
-
with gr.Row():
|
41 |
-
shared.gradio['Stop'] = gr.Button('Stop', elem_id='stop', visible=False)
|
42 |
-
shared.gradio['Generate'] = gr.Button('Generate', elem_id='Generate', variant='primary')
|
43 |
-
|
44 |
-
# Hover menu buttons
|
45 |
-
with gr.Column(elem_id='chat-buttons'):
|
46 |
-
with gr.Row():
|
47 |
-
shared.gradio['Regenerate'] = gr.Button('Regenerate (Ctrl + Enter)', elem_id='Regenerate')
|
48 |
-
shared.gradio['Continue'] = gr.Button('Continue (Alt + Enter)', elem_id='Continue')
|
49 |
-
shared.gradio['Remove last'] = gr.Button('Remove last reply (Ctrl + Shift + Backspace)', elem_id='Remove-last')
|
50 |
-
|
51 |
-
with gr.Row():
|
52 |
-
shared.gradio['Replace last reply'] = gr.Button('Replace last reply (Ctrl + Shift + L)', elem_id='Replace-last')
|
53 |
-
shared.gradio['Copy last reply'] = gr.Button('Copy last reply (Ctrl + Shift + K)', elem_id='Copy-last')
|
54 |
-
shared.gradio['Impersonate'] = gr.Button('Impersonate (Ctrl + Shift + M)', elem_id='Impersonate')
|
55 |
-
|
56 |
-
with gr.Row():
|
57 |
-
shared.gradio['Send dummy message'] = gr.Button('Send dummy message')
|
58 |
-
shared.gradio['Send dummy reply'] = gr.Button('Send dummy reply')
|
59 |
-
|
60 |
-
with gr.Row():
|
61 |
-
shared.gradio['Start new chat'] = gr.Button('Start new chat')
|
62 |
-
|
63 |
-
with gr.Row():
|
64 |
-
shared.gradio['send-chat-to-default'] = gr.Button('Send to default')
|
65 |
-
shared.gradio['send-chat-to-notebook'] = gr.Button('Send to notebook')
|
66 |
-
|
67 |
-
with gr.Row(elem_id='past-chats-row'):
|
68 |
-
shared.gradio['unique_id'] = gr.Dropdown(label='Past chats', elem_classes=['slim-dropdown'], interactive=not mu)
|
69 |
-
shared.gradio['rename_chat'] = gr.Button('Rename', elem_classes='refresh-button', interactive=not mu)
|
70 |
-
shared.gradio['delete_chat'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)
|
71 |
-
shared.gradio['delete_chat-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
72 |
-
shared.gradio['delete_chat-confirm'] = gr.Button('Confirm', variant='stop', visible=False, elem_classes='refresh-button')
|
73 |
-
|
74 |
-
with gr.Row(elem_id='rename-row'):
|
75 |
-
shared.gradio['rename_to'] = gr.Textbox(label='Rename to:', placeholder='New name', visible=False, elem_classes=['no-background'])
|
76 |
-
shared.gradio['rename_to-cancel'] = gr.Button('Cancel', visible=False, elem_classes='refresh-button')
|
77 |
-
shared.gradio['rename_to-confirm'] = gr.Button('Confirm', visible=False, elem_classes='refresh-button')
|
78 |
-
|
79 |
-
with gr.Row():
|
80 |
-
shared.gradio['start_with'] = gr.Textbox(label='Start reply with', placeholder='Sure thing!', value=shared.settings['start_with'])
|
81 |
-
|
82 |
-
with gr.Row():
|
83 |
-
shared.gradio['mode'] = gr.Radio(choices=['chat', 'chat-instruct', 'instruct'], value='chat', label='Mode', info='Defines how the chat prompt is generated. In instruct and chat-instruct modes, the instruction template selected under Parameters > Instruction template must match the current model.', elem_id='chat-mode')
|
84 |
-
shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=shared.settings['mode'] != 'instruct')
|
85 |
-
|
86 |
-
|
87 |
-
def create_chat_settings_ui():
|
88 |
-
mu = shared.args.multi_user
|
89 |
-
with gr.Tab('Character'):
|
90 |
-
with gr.Row():
|
91 |
-
with gr.Column(scale=8):
|
92 |
-
with gr.Row():
|
93 |
-
shared.gradio['character_menu'] = gr.Dropdown(value='', choices=utils.get_available_characters(), label='Character', elem_id='character-menu', info='Used in chat and chat-instruct modes.', elem_classes='slim-dropdown')
|
94 |
-
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button', interactive=not mu)
|
95 |
-
shared.gradio['save_character'] = gr.Button('💾', elem_classes='refresh-button', interactive=not mu)
|
96 |
-
shared.gradio['delete_character'] = gr.Button('🗑️', elem_classes='refresh-button', interactive=not mu)
|
97 |
-
|
98 |
-
shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
|
99 |
-
shared.gradio['name2'] = gr.Textbox(value='', lines=1, label='Character\'s name')
|
100 |
-
shared.gradio['context'] = gr.Textbox(value='', lines=10, label='Context', elem_classes=['add_scrollbar'])
|
101 |
-
shared.gradio['greeting'] = gr.Textbox(value='', lines=5, label='Greeting', elem_classes=['add_scrollbar'])
|
102 |
-
|
103 |
-
with gr.Column(scale=1):
|
104 |
-
shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil', interactive=not mu)
|
105 |
-
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None, interactive=not mu)
|
106 |
-
|
107 |
-
with gr.Tab('Instruction template'):
|
108 |
-
with gr.Row():
|
109 |
-
with gr.Row():
|
110 |
-
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', info='Change this according to the model/LoRA that you are using. Used in instruct and chat-instruct modes.', elem_classes='slim-dropdown')
|
111 |
-
ui.create_refresh_button(shared.gradio['instruction_template'], lambda: None, lambda: {'choices': utils.get_available_instruction_templates()}, 'refresh-button', interactive=not mu)
|
112 |
-
shared.gradio['save_template'] = gr.Button('💾', elem_classes='refresh-button', interactive=not mu)
|
113 |
-
shared.gradio['delete_template'] = gr.Button('🗑️ ', elem_classes='refresh-button', interactive=not mu)
|
114 |
-
|
115 |
-
shared.gradio['name1_instruct'] = gr.Textbox(value='', lines=2, label='User string')
|
116 |
-
shared.gradio['name2_instruct'] = gr.Textbox(value='', lines=1, label='Bot string')
|
117 |
-
shared.gradio['context_instruct'] = gr.Textbox(value='', lines=4, label='Context', elem_classes=['add_scrollbar'])
|
118 |
-
shared.gradio['turn_template'] = gr.Textbox(value='', lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.', elem_classes=['add_scrollbar'])
|
119 |
-
with gr.Row():
|
120 |
-
shared.gradio['send_instruction_to_default'] = gr.Button('Send to default', elem_classes=['small-button'])
|
121 |
-
shared.gradio['send_instruction_to_notebook'] = gr.Button('Send to notebook', elem_classes=['small-button'])
|
122 |
-
shared.gradio['send_instruction_to_negative_prompt'] = gr.Button('Send to negative prompt', elem_classes=['small-button'])
|
123 |
-
|
124 |
-
with gr.Row():
|
125 |
-
shared.gradio['chat-instruct_command'] = gr.Textbox(value=shared.settings['chat-instruct_command'], lines=4, label='Command for chat-instruct mode', info='<|character|> gets replaced by the bot name, and <|prompt|> gets replaced by the regular chat prompt.', elem_classes=['add_scrollbar'])
|
126 |
-
|
127 |
-
with gr.Tab('Chat history'):
|
128 |
-
with gr.Row():
|
129 |
-
with gr.Column():
|
130 |
-
shared.gradio['save_chat_history'] = gr.Button(value='Save history')
|
131 |
-
|
132 |
-
with gr.Column():
|
133 |
-
shared.gradio['load_chat_history'] = gr.File(type='binary', file_types=['.json', '.txt'], label='Upload History JSON')
|
134 |
-
|
135 |
-
with gr.Tab('Upload character'):
|
136 |
-
with gr.Tab('YAML or JSON'):
|
137 |
-
with gr.Row():
|
138 |
-
shared.gradio['upload_json'] = gr.File(type='binary', file_types=['.json', '.yaml'], label='JSON or YAML File', interactive=not mu)
|
139 |
-
shared.gradio['upload_img_bot'] = gr.Image(type='pil', label='Profile Picture (optional)', interactive=not mu)
|
140 |
-
|
141 |
-
shared.gradio['Submit character'] = gr.Button(value='Submit', interactive=False)
|
142 |
-
|
143 |
-
with gr.Tab('TavernAI PNG'):
|
144 |
-
with gr.Row():
|
145 |
-
with gr.Column():
|
146 |
-
shared.gradio['upload_img_tavern'] = gr.Image(type='pil', label='TavernAI PNG File', elem_id='upload_img_tavern', interactive=not mu)
|
147 |
-
shared.gradio['tavern_json'] = gr.State()
|
148 |
-
with gr.Column():
|
149 |
-
shared.gradio['tavern_name'] = gr.Textbox(value='', lines=1, label='Name', interactive=False)
|
150 |
-
shared.gradio['tavern_desc'] = gr.Textbox(value='', lines=4, max_lines=4, label='Description', interactive=False)
|
151 |
-
|
152 |
-
shared.gradio['Submit tavern character'] = gr.Button(value='Submit', interactive=False)
|
153 |
-
|
154 |
-
|
155 |
-
def create_event_handlers():
|
156 |
-
|
157 |
-
# Obsolete variables, kept for compatibility with old extensions
|
158 |
-
shared.input_params = gradio(inputs)
|
159 |
-
shared.reload_inputs = gradio(reload_arr)
|
160 |
-
|
161 |
-
shared.gradio['Generate'].click(
|
162 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
163 |
-
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
164 |
-
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
165 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
166 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
167 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
168 |
-
|
169 |
-
shared.gradio['textbox'].submit(
|
170 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
171 |
-
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
|
172 |
-
chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
173 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
174 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
175 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
176 |
-
|
177 |
-
shared.gradio['Regenerate'].click(
|
178 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
179 |
-
partial(chat.generate_chat_reply_wrapper, regenerate=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
180 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
181 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
182 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
183 |
-
|
184 |
-
shared.gradio['Continue'].click(
|
185 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
186 |
-
partial(chat.generate_chat_reply_wrapper, _continue=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
|
187 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
188 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
189 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
190 |
-
|
191 |
-
shared.gradio['Impersonate'].click(
|
192 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
193 |
-
lambda x: x, gradio('textbox'), gradio('Chat input'), show_progress=False).then(
|
194 |
-
chat.impersonate_wrapper, gradio(inputs), gradio('textbox', 'display'), show_progress=False).then(
|
195 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
196 |
-
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
|
197 |
-
|
198 |
-
shared.gradio['Replace last reply'].click(
|
199 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
200 |
-
chat.replace_last_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
|
201 |
-
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
202 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
203 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
204 |
-
|
205 |
-
shared.gradio['Send dummy message'].click(
|
206 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
207 |
-
chat.send_dummy_message, gradio('textbox', 'interface_state'), gradio('history')).then(
|
208 |
-
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
209 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
210 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
211 |
-
|
212 |
-
shared.gradio['Send dummy reply'].click(
|
213 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
214 |
-
chat.send_dummy_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
|
215 |
-
lambda: '', None, gradio('textbox'), show_progress=False).then(
|
216 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
217 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
218 |
-
|
219 |
-
shared.gradio['Remove last'].click(
|
220 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
221 |
-
chat.remove_last_message, gradio('history'), gradio('textbox', 'history'), show_progress=False).then(
|
222 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
223 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None)
|
224 |
-
|
225 |
-
shared.gradio['Stop'].click(
|
226 |
-
stop_everything_event, None, None, queue=False).then(
|
227 |
-
chat.redraw_html, gradio(reload_arr), gradio('display'))
|
228 |
-
|
229 |
-
if not shared.args.multi_user:
|
230 |
-
shared.gradio['unique_id'].select(
|
231 |
-
chat.load_history, gradio('unique_id', 'character_menu', 'mode'), gradio('history')).then(
|
232 |
-
chat.redraw_html, gradio(reload_arr), gradio('display'))
|
233 |
-
|
234 |
-
shared.gradio['Start new chat'].click(
|
235 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
236 |
-
chat.start_new_chat, gradio('interface_state'), gradio('history')).then(
|
237 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
238 |
-
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))
|
239 |
-
|
240 |
-
shared.gradio['delete_chat'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, gradio(clear_arr))
|
241 |
-
shared.gradio['delete_chat-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
242 |
-
shared.gradio['delete_chat-confirm'].click(
|
243 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
244 |
-
chat.delete_history, gradio('unique_id', 'character_menu', 'mode'), None).then(
|
245 |
-
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
246 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
247 |
-
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id')).then(
|
248 |
-
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
|
249 |
-
|
250 |
-
shared.gradio['rename_chat'].click(
|
251 |
-
lambda x: x, gradio('unique_id'), gradio('rename_to')).then(
|
252 |
-
lambda: [gr.update(visible=True)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
253 |
-
|
254 |
-
shared.gradio['rename_to-cancel'].click(
|
255 |
-
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False)
|
256 |
-
|
257 |
-
shared.gradio['rename_to-confirm'].click(
|
258 |
-
chat.rename_history, gradio('unique_id', 'rename_to', 'character_menu', 'mode'), None).then(
|
259 |
-
lambda: [gr.update(visible=False)] * 3, None, gradio('rename_to', 'rename_to-confirm', 'rename_to-cancel'), show_progress=False).then(
|
260 |
-
lambda x, y: gr.update(choices=chat.find_all_histories(x), value=y), gradio('interface_state', 'rename_to'), gradio('unique_id'))
|
261 |
-
|
262 |
-
shared.gradio['load_chat_history'].upload(
|
263 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
264 |
-
chat.start_new_chat, gradio('interface_state'), gradio('history')).then(
|
265 |
-
chat.load_history_json, gradio('load_chat_history', 'history'), gradio('history')).then(
|
266 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
267 |
-
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id')).then(
|
268 |
-
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
269 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_chat()}}')
|
270 |
-
|
271 |
-
shared.gradio['character_menu'].change(
|
272 |
-
partial(chat.load_character, instruct=False), gradio('character_menu', 'name1', 'name2'), gradio('name1', 'name2', 'character_picture', 'greeting', 'context', 'dummy')).success(
|
273 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
274 |
-
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
275 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
276 |
-
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))
|
277 |
-
|
278 |
-
shared.gradio['mode'].change(
|
279 |
-
lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then(
|
280 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
281 |
-
partial(chat.character_is_loaded, raise_exception=True), gradio('interface_state'), None).success(
|
282 |
-
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
283 |
-
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
284 |
-
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))
|
285 |
-
|
286 |
-
shared.gradio['chat_style'].change(chat.redraw_html, gradio(reload_arr), gradio('display'))
|
287 |
-
shared.gradio['instruction_template'].change(
|
288 |
-
partial(chat.load_character, instruct=True), gradio('instruction_template', 'name1_instruct', 'name2_instruct'), gradio('name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template'))
|
289 |
-
|
290 |
-
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, gradio('history'), gradio('textbox'), show_progress=False)
|
291 |
-
|
292 |
-
# Save/delete a character
|
293 |
-
shared.gradio['save_character'].click(
|
294 |
-
lambda x: x, gradio('name2'), gradio('save_character_filename')).then(
|
295 |
-
lambda: gr.update(visible=True), None, gradio('character_saver'))
|
296 |
-
|
297 |
-
shared.gradio['delete_character'].click(lambda: gr.update(visible=True), None, gradio('character_deleter'))
|
298 |
-
|
299 |
-
shared.gradio['save_template'].click(
|
300 |
-
lambda: 'My Template.yaml', None, gradio('save_filename')).then(
|
301 |
-
lambda: 'instruction-templates/', None, gradio('save_root')).then(
|
302 |
-
chat.generate_instruction_template_yaml, gradio('name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template'), gradio('save_contents')).then(
|
303 |
-
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
304 |
-
|
305 |
-
shared.gradio['delete_template'].click(
|
306 |
-
lambda x: f'{x}.yaml', gradio('instruction_template'), gradio('delete_filename')).then(
|
307 |
-
lambda: 'instruction-templates/', None, gradio('delete_root')).then(
|
308 |
-
lambda: gr.update(visible=True), None, gradio('file_deleter'))
|
309 |
-
|
310 |
-
shared.gradio['save_chat_history'].click(
|
311 |
-
lambda x: json.dumps(x, indent=4), gradio('history'), gradio('temporary_text')).then(
|
312 |
-
None, gradio('temporary_text', 'character_menu', 'mode'), None, _js=f'(hist, char, mode) => {{{ui.save_files_js}; saveHistory(hist, char, mode)}}')
|
313 |
-
|
314 |
-
shared.gradio['Submit character'].click(
|
315 |
-
chat.upload_character, gradio('upload_json', 'upload_img_bot'), gradio('character_menu')).then(
|
316 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
317 |
-
|
318 |
-
shared.gradio['Submit tavern character'].click(
|
319 |
-
chat.upload_tavern_character, gradio('upload_img_tavern', 'tavern_json'), gradio('character_menu')).then(
|
320 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_character()}}')
|
321 |
-
|
322 |
-
shared.gradio['upload_json'].upload(lambda: gr.update(interactive=True), None, gradio('Submit character'))
|
323 |
-
shared.gradio['upload_json'].clear(lambda: gr.update(interactive=False), None, gradio('Submit character'))
|
324 |
-
shared.gradio['upload_img_tavern'].upload(chat.check_tavern_character, gradio('upload_img_tavern'), gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
325 |
-
shared.gradio['upload_img_tavern'].clear(lambda: (None, None, None, gr.update(interactive=False)), None, gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
|
326 |
-
shared.gradio['your_picture'].change(
|
327 |
-
chat.upload_your_profile_picture, gradio('your_picture'), None).then(
|
328 |
-
partial(chat.redraw_html, reset_cache=True), gradio(reload_arr), gradio('display'))
|
329 |
-
|
330 |
-
shared.gradio['send_instruction_to_default'].click(
|
331 |
-
prompts.load_instruction_prompt_simple, gradio('instruction_template'), gradio('textbox-default')).then(
|
332 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
333 |
-
|
334 |
-
shared.gradio['send_instruction_to_notebook'].click(
|
335 |
-
prompts.load_instruction_prompt_simple, gradio('instruction_template'), gradio('textbox-notebook')).then(
|
336 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
337 |
-
|
338 |
-
shared.gradio['send_instruction_to_negative_prompt'].click(
|
339 |
-
prompts.load_instruction_prompt_simple, gradio('instruction_template'), gradio('negative_prompt')).then(
|
340 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_generation_parameters()}}')
|
341 |
-
|
342 |
-
shared.gradio['send-chat-to-default'].click(
|
343 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
344 |
-
partial(chat.generate_chat_prompt, '', _continue=True), gradio('interface_state'), gradio('textbox-default')).then(
|
345 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_default()}}')
|
346 |
-
|
347 |
-
shared.gradio['send-chat-to-notebook'].click(
|
348 |
-
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
349 |
-
partial(chat.generate_chat_prompt, '', _continue=True), gradio('interface_state'), gradio('textbox-notebook')).then(
|
350 |
-
lambda: None, None, None, _js=f'() => {{{ui.switch_tabs_js}; switch_to_notebook()}}')
|
351 |
-
|
352 |
-
shared.gradio['show_controls'].change(None, gradio('show_controls'), None, _js=f'(x) => {{{ui.show_controls_js}; toggle_controls(x)}}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AsakuraMizu/moe-tts/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/serialize.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
import base64
|
6 |
-
import io
|
7 |
-
import json
|
8 |
-
import zlib
|
9 |
-
|
10 |
-
from pip._vendor import msgpack
|
11 |
-
from pip._vendor.requests.structures import CaseInsensitiveDict
|
12 |
-
|
13 |
-
from .compat import HTTPResponse, pickle, text_type
|
14 |
-
|
15 |
-
|
16 |
-
def _b64_decode_bytes(b):
|
17 |
-
return base64.b64decode(b.encode("ascii"))
|
18 |
-
|
19 |
-
|
20 |
-
def _b64_decode_str(s):
|
21 |
-
return _b64_decode_bytes(s).decode("utf8")
|
22 |
-
|
23 |
-
|
24 |
-
_default_body_read = object()
|
25 |
-
|
26 |
-
|
27 |
-
class Serializer(object):
|
28 |
-
def dumps(self, request, response, body=None):
|
29 |
-
response_headers = CaseInsensitiveDict(response.headers)
|
30 |
-
|
31 |
-
if body is None:
|
32 |
-
# When a body isn't passed in, we'll read the response. We
|
33 |
-
# also update the response with a new file handler to be
|
34 |
-
# sure it acts as though it was never read.
|
35 |
-
body = response.read(decode_content=False)
|
36 |
-
response._fp = io.BytesIO(body)
|
37 |
-
|
38 |
-
# NOTE: This is all a bit weird, but it's really important that on
|
39 |
-
# Python 2.x these objects are unicode and not str, even when
|
40 |
-
# they contain only ascii. The problem here is that msgpack
|
41 |
-
# understands the difference between unicode and bytes and we
|
42 |
-
# have it set to differentiate between them, however Python 2
|
43 |
-
# doesn't know the difference. Forcing these to unicode will be
|
44 |
-
# enough to have msgpack know the difference.
|
45 |
-
data = {
|
46 |
-
u"response": {
|
47 |
-
u"body": body, # Empty bytestring if body is stored separately
|
48 |
-
u"headers": dict(
|
49 |
-
(text_type(k), text_type(v)) for k, v in response.headers.items()
|
50 |
-
),
|
51 |
-
u"status": response.status,
|
52 |
-
u"version": response.version,
|
53 |
-
u"reason": text_type(response.reason),
|
54 |
-
u"strict": response.strict,
|
55 |
-
u"decode_content": response.decode_content,
|
56 |
-
}
|
57 |
-
}
|
58 |
-
|
59 |
-
# Construct our vary headers
|
60 |
-
data[u"vary"] = {}
|
61 |
-
if u"vary" in response_headers:
|
62 |
-
varied_headers = response_headers[u"vary"].split(",")
|
63 |
-
for header in varied_headers:
|
64 |
-
header = text_type(header).strip()
|
65 |
-
header_value = request.headers.get(header, None)
|
66 |
-
if header_value is not None:
|
67 |
-
header_value = text_type(header_value)
|
68 |
-
data[u"vary"][header] = header_value
|
69 |
-
|
70 |
-
return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
|
71 |
-
|
72 |
-
def loads(self, request, data, body_file=None):
|
73 |
-
# Short circuit if we've been given an empty set of data
|
74 |
-
if not data:
|
75 |
-
return
|
76 |
-
|
77 |
-
# Determine what version of the serializer the data was serialized
|
78 |
-
# with
|
79 |
-
try:
|
80 |
-
ver, data = data.split(b",", 1)
|
81 |
-
except ValueError:
|
82 |
-
ver = b"cc=0"
|
83 |
-
|
84 |
-
# Make sure that our "ver" is actually a version and isn't a false
|
85 |
-
# positive from a , being in the data stream.
|
86 |
-
if ver[:3] != b"cc=":
|
87 |
-
data = ver + data
|
88 |
-
ver = b"cc=0"
|
89 |
-
|
90 |
-
# Get the version number out of the cc=N
|
91 |
-
ver = ver.split(b"=", 1)[-1].decode("ascii")
|
92 |
-
|
93 |
-
# Dispatch to the actual load method for the given version
|
94 |
-
try:
|
95 |
-
return getattr(self, "_loads_v{}".format(ver))(request, data, body_file)
|
96 |
-
|
97 |
-
except AttributeError:
|
98 |
-
# This is a version we don't have a loads function for, so we'll
|
99 |
-
# just treat it as a miss and return None
|
100 |
-
return
|
101 |
-
|
102 |
-
def prepare_response(self, request, cached, body_file=None):
|
103 |
-
"""Verify our vary headers match and construct a real urllib3
|
104 |
-
HTTPResponse object.
|
105 |
-
"""
|
106 |
-
# Special case the '*' Vary value as it means we cannot actually
|
107 |
-
# determine if the cached response is suitable for this request.
|
108 |
-
# This case is also handled in the controller code when creating
|
109 |
-
# a cache entry, but is left here for backwards compatibility.
|
110 |
-
if "*" in cached.get("vary", {}):
|
111 |
-
return
|
112 |
-
|
113 |
-
# Ensure that the Vary headers for the cached response match our
|
114 |
-
# request
|
115 |
-
for header, value in cached.get("vary", {}).items():
|
116 |
-
if request.headers.get(header, None) != value:
|
117 |
-
return
|
118 |
-
|
119 |
-
body_raw = cached["response"].pop("body")
|
120 |
-
|
121 |
-
headers = CaseInsensitiveDict(data=cached["response"]["headers"])
|
122 |
-
if headers.get("transfer-encoding", "") == "chunked":
|
123 |
-
headers.pop("transfer-encoding")
|
124 |
-
|
125 |
-
cached["response"]["headers"] = headers
|
126 |
-
|
127 |
-
try:
|
128 |
-
if body_file is None:
|
129 |
-
body = io.BytesIO(body_raw)
|
130 |
-
else:
|
131 |
-
body = body_file
|
132 |
-
except TypeError:
|
133 |
-
# This can happen if cachecontrol serialized to v1 format (pickle)
|
134 |
-
# using Python 2. A Python 2 str(byte string) will be unpickled as
|
135 |
-
# a Python 3 str (unicode string), which will cause the above to
|
136 |
-
# fail with:
|
137 |
-
#
|
138 |
-
# TypeError: 'str' does not support the buffer interface
|
139 |
-
body = io.BytesIO(body_raw.encode("utf8"))
|
140 |
-
|
141 |
-
return HTTPResponse(body=body, preload_content=False, **cached["response"])
|
142 |
-
|
143 |
-
def _loads_v0(self, request, data, body_file=None):
|
144 |
-
# The original legacy cache data. This doesn't contain enough
|
145 |
-
# information to construct everything we need, so we'll treat this as
|
146 |
-
# a miss.
|
147 |
-
return
|
148 |
-
|
149 |
-
def _loads_v1(self, request, data, body_file=None):
|
150 |
-
try:
|
151 |
-
cached = pickle.loads(data)
|
152 |
-
except ValueError:
|
153 |
-
return
|
154 |
-
|
155 |
-
return self.prepare_response(request, cached, body_file)
|
156 |
-
|
157 |
-
def _loads_v2(self, request, data, body_file=None):
|
158 |
-
assert body_file is None
|
159 |
-
try:
|
160 |
-
cached = json.loads(zlib.decompress(data).decode("utf8"))
|
161 |
-
except (ValueError, zlib.error):
|
162 |
-
return
|
163 |
-
|
164 |
-
# We need to decode the items that we've base64 encoded
|
165 |
-
cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"])
|
166 |
-
cached["response"]["headers"] = dict(
|
167 |
-
(_b64_decode_str(k), _b64_decode_str(v))
|
168 |
-
for k, v in cached["response"]["headers"].items()
|
169 |
-
)
|
170 |
-
cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"])
|
171 |
-
cached["vary"] = dict(
|
172 |
-
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
|
173 |
-
for k, v in cached["vary"].items()
|
174 |
-
)
|
175 |
-
|
176 |
-
return self.prepare_response(request, cached, body_file)
|
177 |
-
|
178 |
-
def _loads_v3(self, request, data, body_file):
|
179 |
-
# Due to Python 2 encoding issues, it's impossible to know for sure
|
180 |
-
# exactly how to load v3 entries, thus we'll treat these as a miss so
|
181 |
-
# that they get rewritten out as v4 entries.
|
182 |
-
return
|
183 |
-
|
184 |
-
def _loads_v4(self, request, data, body_file=None):
|
185 |
-
try:
|
186 |
-
cached = msgpack.loads(data, raw=False)
|
187 |
-
except ValueError:
|
188 |
-
return
|
189 |
-
|
190 |
-
return self.prepare_response(request, cached, body_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/tests.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
"""
|
3 |
-
|
4 |
-
webencodings.tests
|
5 |
-
~~~~~~~~~~~~~~~~~~
|
6 |
-
|
7 |
-
A basic test suite for Encoding.
|
8 |
-
|
9 |
-
:copyright: Copyright 2012 by Simon Sapin
|
10 |
-
:license: BSD, see LICENSE for details.
|
11 |
-
|
12 |
-
"""
|
13 |
-
|
14 |
-
from __future__ import unicode_literals
|
15 |
-
|
16 |
-
from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
|
17 |
-
IncrementalDecoder, IncrementalEncoder, UTF8)
|
18 |
-
|
19 |
-
|
20 |
-
def assert_raises(exception, function, *args, **kwargs):
|
21 |
-
try:
|
22 |
-
function(*args, **kwargs)
|
23 |
-
except exception:
|
24 |
-
return
|
25 |
-
else: # pragma: no cover
|
26 |
-
raise AssertionError('Did not raise %s.' % exception)
|
27 |
-
|
28 |
-
|
29 |
-
def test_labels():
|
30 |
-
assert lookup('utf-8').name == 'utf-8'
|
31 |
-
assert lookup('Utf-8').name == 'utf-8'
|
32 |
-
assert lookup('UTF-8').name == 'utf-8'
|
33 |
-
assert lookup('utf8').name == 'utf-8'
|
34 |
-
assert lookup('utf8').name == 'utf-8'
|
35 |
-
assert lookup('utf8 ').name == 'utf-8'
|
36 |
-
assert lookup(' \r\nutf8\t').name == 'utf-8'
|
37 |
-
assert lookup('u8') is None # Python label.
|
38 |
-
assert lookup('utf-8 ') is None # Non-ASCII white space.
|
39 |
-
|
40 |
-
assert lookup('US-ASCII').name == 'windows-1252'
|
41 |
-
assert lookup('iso-8859-1').name == 'windows-1252'
|
42 |
-
assert lookup('latin1').name == 'windows-1252'
|
43 |
-
assert lookup('LATIN1').name == 'windows-1252'
|
44 |
-
assert lookup('latin-1') is None
|
45 |
-
assert lookup('LATİN1') is None # ASCII-only case insensitivity.
|
46 |
-
|
47 |
-
|
48 |
-
def test_all_labels():
|
49 |
-
for label in LABELS:
|
50 |
-
assert decode(b'', label) == ('', lookup(label))
|
51 |
-
assert encode('', label) == b''
|
52 |
-
for repeat in [0, 1, 12]:
|
53 |
-
output, _ = iter_decode([b''] * repeat, label)
|
54 |
-
assert list(output) == []
|
55 |
-
assert list(iter_encode([''] * repeat, label)) == []
|
56 |
-
decoder = IncrementalDecoder(label)
|
57 |
-
assert decoder.decode(b'') == ''
|
58 |
-
assert decoder.decode(b'', final=True) == ''
|
59 |
-
encoder = IncrementalEncoder(label)
|
60 |
-
assert encoder.encode('') == b''
|
61 |
-
assert encoder.encode('', final=True) == b''
|
62 |
-
# All encoding names are valid labels too:
|
63 |
-
for name in set(LABELS.values()):
|
64 |
-
assert lookup(name).name == name
|
65 |
-
|
66 |
-
|
67 |
-
def test_invalid_label():
|
68 |
-
assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
|
69 |
-
assert_raises(LookupError, encode, 'é', 'invalid')
|
70 |
-
assert_raises(LookupError, iter_decode, [], 'invalid')
|
71 |
-
assert_raises(LookupError, iter_encode, [], 'invalid')
|
72 |
-
assert_raises(LookupError, IncrementalDecoder, 'invalid')
|
73 |
-
assert_raises(LookupError, IncrementalEncoder, 'invalid')
|
74 |
-
|
75 |
-
|
76 |
-
def test_decode():
|
77 |
-
assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
|
78 |
-
assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
|
79 |
-
assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
|
80 |
-
assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
|
81 |
-
assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
|
82 |
-
assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
|
83 |
-
|
84 |
-
assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
|
85 |
-
assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
|
86 |
-
assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
|
87 |
-
assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
|
88 |
-
|
89 |
-
assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
|
90 |
-
assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
|
91 |
-
assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
|
92 |
-
|
93 |
-
assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
|
94 |
-
assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
|
95 |
-
assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
|
96 |
-
|
97 |
-
|
98 |
-
def test_encode():
|
99 |
-
assert encode('é', 'latin1') == b'\xe9'
|
100 |
-
assert encode('é', 'utf8') == b'\xc3\xa9'
|
101 |
-
assert encode('é', 'utf8') == b'\xc3\xa9'
|
102 |
-
assert encode('é', 'utf-16') == b'\xe9\x00'
|
103 |
-
assert encode('é', 'utf-16le') == b'\xe9\x00'
|
104 |
-
assert encode('é', 'utf-16be') == b'\x00\xe9'
|
105 |
-
|
106 |
-
|
107 |
-
def test_iter_decode():
|
108 |
-
def iter_decode_to_string(input, fallback_encoding):
|
109 |
-
output, _encoding = iter_decode(input, fallback_encoding)
|
110 |
-
return ''.join(output)
|
111 |
-
assert iter_decode_to_string([], 'latin1') == ''
|
112 |
-
assert iter_decode_to_string([b''], 'latin1') == ''
|
113 |
-
assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
|
114 |
-
assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
|
115 |
-
assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
|
116 |
-
assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
|
117 |
-
assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
|
118 |
-
assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
|
119 |
-
assert iter_decode_to_string([
|
120 |
-
b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
|
121 |
-
assert iter_decode_to_string([
|
122 |
-
b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
|
123 |
-
assert iter_decode_to_string([
|
124 |
-
b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
|
125 |
-
assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
|
126 |
-
assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
|
127 |
-
assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
|
128 |
-
assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
|
129 |
-
assert iter_decode_to_string([
|
130 |
-
b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
|
131 |
-
assert iter_decode_to_string([
|
132 |
-
b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
|
133 |
-
|
134 |
-
|
135 |
-
def test_iter_encode():
|
136 |
-
assert b''.join(iter_encode([], 'latin1')) == b''
|
137 |
-
assert b''.join(iter_encode([''], 'latin1')) == b''
|
138 |
-
assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
|
139 |
-
assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
|
140 |
-
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
|
141 |
-
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
|
142 |
-
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
|
143 |
-
assert b''.join(iter_encode([
|
144 |
-
'', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
|
145 |
-
|
146 |
-
|
147 |
-
def test_x_user_defined():
|
148 |
-
encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
|
149 |
-
decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
|
150 |
-
encoded = b'aa'
|
151 |
-
decoded = 'aa'
|
152 |
-
assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
|
153 |
-
assert encode(decoded, 'x-user-defined') == encoded
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/unixccompiler.py
DELETED
@@ -1,401 +0,0 @@
|
|
1 |
-
"""distutils.unixccompiler
|
2 |
-
|
3 |
-
Contains the UnixCCompiler class, a subclass of CCompiler that handles
|
4 |
-
the "typical" Unix-style command-line C compiler:
|
5 |
-
* macros defined with -Dname[=value]
|
6 |
-
* macros undefined with -Uname
|
7 |
-
* include search directories specified with -Idir
|
8 |
-
* libraries specified with -lllib
|
9 |
-
* library search directories specified with -Ldir
|
10 |
-
* compile handled by 'cc' (or similar) executable with -c option:
|
11 |
-
compiles .c to .o
|
12 |
-
* link static library handled by 'ar' command (possibly with 'ranlib')
|
13 |
-
* link shared library handled by 'cc -shared'
|
14 |
-
"""
|
15 |
-
|
16 |
-
import os
|
17 |
-
import sys
|
18 |
-
import re
|
19 |
-
import shlex
|
20 |
-
import itertools
|
21 |
-
|
22 |
-
from distutils import sysconfig
|
23 |
-
from distutils.dep_util import newer
|
24 |
-
from distutils.ccompiler import CCompiler, gen_preprocess_options, gen_lib_options
|
25 |
-
from distutils.errors import DistutilsExecError, CompileError, LibError, LinkError
|
26 |
-
from distutils import log
|
27 |
-
from ._macos_compat import compiler_fixup
|
28 |
-
|
29 |
-
# XXX Things not currently handled:
|
30 |
-
# * optimization/debug/warning flags; we just use whatever's in Python's
|
31 |
-
# Makefile and live with it. Is this adequate? If not, we might
|
32 |
-
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
|
33 |
-
# SunCCompiler, and I suspect down that road lies madness.
|
34 |
-
# * even if we don't know a warning flag from an optimization flag,
|
35 |
-
# we need some way for outsiders to feed preprocessor/compiler/linker
|
36 |
-
# flags in to us -- eg. a sysadmin might want to mandate certain flags
|
37 |
-
# via a site config file, or a user might want to set something for
|
38 |
-
# compiling this module distribution only via the setup.py command
|
39 |
-
# line, whatever. As long as these options come from something on the
|
40 |
-
# current system, they can be as system-dependent as they like, and we
|
41 |
-
# should just happily stuff them into the preprocessor/compiler/linker
|
42 |
-
# options and carry on.
|
43 |
-
|
44 |
-
|
45 |
-
def _split_env(cmd):
|
46 |
-
"""
|
47 |
-
For macOS, split command into 'env' portion (if any)
|
48 |
-
and the rest of the linker command.
|
49 |
-
|
50 |
-
>>> _split_env(['a', 'b', 'c'])
|
51 |
-
([], ['a', 'b', 'c'])
|
52 |
-
>>> _split_env(['/usr/bin/env', 'A=3', 'gcc'])
|
53 |
-
(['/usr/bin/env', 'A=3'], ['gcc'])
|
54 |
-
"""
|
55 |
-
pivot = 0
|
56 |
-
if os.path.basename(cmd[0]) == "env":
|
57 |
-
pivot = 1
|
58 |
-
while '=' in cmd[pivot]:
|
59 |
-
pivot += 1
|
60 |
-
return cmd[:pivot], cmd[pivot:]
|
61 |
-
|
62 |
-
|
63 |
-
def _split_aix(cmd):
|
64 |
-
"""
|
65 |
-
AIX platforms prefix the compiler with the ld_so_aix
|
66 |
-
script, so split that from the linker command.
|
67 |
-
|
68 |
-
>>> _split_aix(['a', 'b', 'c'])
|
69 |
-
([], ['a', 'b', 'c'])
|
70 |
-
>>> _split_aix(['/bin/foo/ld_so_aix', 'gcc'])
|
71 |
-
(['/bin/foo/ld_so_aix'], ['gcc'])
|
72 |
-
"""
|
73 |
-
pivot = os.path.basename(cmd[0]) == 'ld_so_aix'
|
74 |
-
return cmd[:pivot], cmd[pivot:]
|
75 |
-
|
76 |
-
|
77 |
-
def _linker_params(linker_cmd, compiler_cmd):
|
78 |
-
"""
|
79 |
-
The linker command usually begins with the compiler
|
80 |
-
command (possibly multiple elements), followed by zero or more
|
81 |
-
params for shared library building.
|
82 |
-
|
83 |
-
If the LDSHARED env variable overrides the linker command,
|
84 |
-
however, the commands may not match.
|
85 |
-
|
86 |
-
Return the best guess of the linker parameters by stripping
|
87 |
-
the linker command. If the compiler command does not
|
88 |
-
match the linker command, assume the linker command is
|
89 |
-
just the first element.
|
90 |
-
|
91 |
-
>>> _linker_params('gcc foo bar'.split(), ['gcc'])
|
92 |
-
['foo', 'bar']
|
93 |
-
>>> _linker_params('gcc foo bar'.split(), ['other'])
|
94 |
-
['foo', 'bar']
|
95 |
-
>>> _linker_params('ccache gcc foo bar'.split(), 'ccache gcc'.split())
|
96 |
-
['foo', 'bar']
|
97 |
-
>>> _linker_params(['gcc'], ['gcc'])
|
98 |
-
[]
|
99 |
-
"""
|
100 |
-
c_len = len(compiler_cmd)
|
101 |
-
pivot = c_len if linker_cmd[:c_len] == compiler_cmd else 1
|
102 |
-
return linker_cmd[pivot:]
|
103 |
-
|
104 |
-
|
105 |
-
class UnixCCompiler(CCompiler):
|
106 |
-
|
107 |
-
compiler_type = 'unix'
|
108 |
-
|
109 |
-
# These are used by CCompiler in two places: the constructor sets
|
110 |
-
# instance attributes 'preprocessor', 'compiler', etc. from them, and
|
111 |
-
# 'set_executable()' allows any of these to be set. The defaults here
|
112 |
-
# are pretty generic; they will probably have to be set by an outsider
|
113 |
-
# (eg. using information discovered by the sysconfig about building
|
114 |
-
# Python extensions).
|
115 |
-
executables = {
|
116 |
-
'preprocessor': None,
|
117 |
-
'compiler': ["cc"],
|
118 |
-
'compiler_so': ["cc"],
|
119 |
-
'compiler_cxx': ["cc"],
|
120 |
-
'linker_so': ["cc", "-shared"],
|
121 |
-
'linker_exe': ["cc"],
|
122 |
-
'archiver': ["ar", "-cr"],
|
123 |
-
'ranlib': None,
|
124 |
-
}
|
125 |
-
|
126 |
-
if sys.platform[:6] == "darwin":
|
127 |
-
executables['ranlib'] = ["ranlib"]
|
128 |
-
|
129 |
-
# Needed for the filename generation methods provided by the base
|
130 |
-
# class, CCompiler. NB. whoever instantiates/uses a particular
|
131 |
-
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
|
132 |
-
# reasonable common default here, but it's not necessarily used on all
|
133 |
-
# Unices!
|
134 |
-
|
135 |
-
src_extensions = [".c", ".C", ".cc", ".cxx", ".cpp", ".m"]
|
136 |
-
obj_extension = ".o"
|
137 |
-
static_lib_extension = ".a"
|
138 |
-
shared_lib_extension = ".so"
|
139 |
-
dylib_lib_extension = ".dylib"
|
140 |
-
xcode_stub_lib_extension = ".tbd"
|
141 |
-
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
|
142 |
-
xcode_stub_lib_format = dylib_lib_format
|
143 |
-
if sys.platform == "cygwin":
|
144 |
-
exe_extension = ".exe"
|
145 |
-
|
146 |
-
def preprocess(
|
147 |
-
self,
|
148 |
-
source,
|
149 |
-
output_file=None,
|
150 |
-
macros=None,
|
151 |
-
include_dirs=None,
|
152 |
-
extra_preargs=None,
|
153 |
-
extra_postargs=None,
|
154 |
-
):
|
155 |
-
fixed_args = self._fix_compile_args(None, macros, include_dirs)
|
156 |
-
ignore, macros, include_dirs = fixed_args
|
157 |
-
pp_opts = gen_preprocess_options(macros, include_dirs)
|
158 |
-
pp_args = self.preprocessor + pp_opts
|
159 |
-
if output_file:
|
160 |
-
pp_args.extend(['-o', output_file])
|
161 |
-
if extra_preargs:
|
162 |
-
pp_args[:0] = extra_preargs
|
163 |
-
if extra_postargs:
|
164 |
-
pp_args.extend(extra_postargs)
|
165 |
-
pp_args.append(source)
|
166 |
-
|
167 |
-
# reasons to preprocess:
|
168 |
-
# - force is indicated
|
169 |
-
# - output is directed to stdout
|
170 |
-
# - source file is newer than the target
|
171 |
-
preprocess = self.force or output_file is None or newer(source, output_file)
|
172 |
-
if not preprocess:
|
173 |
-
return
|
174 |
-
|
175 |
-
if output_file:
|
176 |
-
self.mkpath(os.path.dirname(output_file))
|
177 |
-
|
178 |
-
try:
|
179 |
-
self.spawn(pp_args)
|
180 |
-
except DistutilsExecError as msg:
|
181 |
-
raise CompileError(msg)
|
182 |
-
|
183 |
-
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
|
184 |
-
compiler_so = compiler_fixup(self.compiler_so, cc_args + extra_postargs)
|
185 |
-
try:
|
186 |
-
self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs)
|
187 |
-
except DistutilsExecError as msg:
|
188 |
-
raise CompileError(msg)
|
189 |
-
|
190 |
-
def create_static_lib(
|
191 |
-
self, objects, output_libname, output_dir=None, debug=0, target_lang=None
|
192 |
-
):
|
193 |
-
objects, output_dir = self._fix_object_args(objects, output_dir)
|
194 |
-
|
195 |
-
output_filename = self.library_filename(output_libname, output_dir=output_dir)
|
196 |
-
|
197 |
-
if self._need_link(objects, output_filename):
|
198 |
-
self.mkpath(os.path.dirname(output_filename))
|
199 |
-
self.spawn(self.archiver + [output_filename] + objects + self.objects)
|
200 |
-
|
201 |
-
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
|
202 |
-
# think the only major Unix that does. Maybe we need some
|
203 |
-
# platform intelligence here to skip ranlib if it's not
|
204 |
-
# needed -- or maybe Python's configure script took care of
|
205 |
-
# it for us, hence the check for leading colon.
|
206 |
-
if self.ranlib:
|
207 |
-
try:
|
208 |
-
self.spawn(self.ranlib + [output_filename])
|
209 |
-
except DistutilsExecError as msg:
|
210 |
-
raise LibError(msg)
|
211 |
-
else:
|
212 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
213 |
-
|
214 |
-
def link(
|
215 |
-
self,
|
216 |
-
target_desc,
|
217 |
-
objects,
|
218 |
-
output_filename,
|
219 |
-
output_dir=None,
|
220 |
-
libraries=None,
|
221 |
-
library_dirs=None,
|
222 |
-
runtime_library_dirs=None,
|
223 |
-
export_symbols=None,
|
224 |
-
debug=0,
|
225 |
-
extra_preargs=None,
|
226 |
-
extra_postargs=None,
|
227 |
-
build_temp=None,
|
228 |
-
target_lang=None,
|
229 |
-
):
|
230 |
-
objects, output_dir = self._fix_object_args(objects, output_dir)
|
231 |
-
fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
|
232 |
-
libraries, library_dirs, runtime_library_dirs = fixed_args
|
233 |
-
|
234 |
-
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
|
235 |
-
if not isinstance(output_dir, (str, type(None))):
|
236 |
-
raise TypeError("'output_dir' must be a string or None")
|
237 |
-
if output_dir is not None:
|
238 |
-
output_filename = os.path.join(output_dir, output_filename)
|
239 |
-
|
240 |
-
if self._need_link(objects, output_filename):
|
241 |
-
ld_args = objects + self.objects + lib_opts + ['-o', output_filename]
|
242 |
-
if debug:
|
243 |
-
ld_args[:0] = ['-g']
|
244 |
-
if extra_preargs:
|
245 |
-
ld_args[:0] = extra_preargs
|
246 |
-
if extra_postargs:
|
247 |
-
ld_args.extend(extra_postargs)
|
248 |
-
self.mkpath(os.path.dirname(output_filename))
|
249 |
-
try:
|
250 |
-
# Select a linker based on context: linker_exe when
|
251 |
-
# building an executable or linker_so (with shared options)
|
252 |
-
# when building a shared library.
|
253 |
-
building_exe = target_desc == CCompiler.EXECUTABLE
|
254 |
-
linker = (self.linker_exe if building_exe else self.linker_so)[:]
|
255 |
-
|
256 |
-
if target_lang == "c++" and self.compiler_cxx:
|
257 |
-
env, linker_ne = _split_env(linker)
|
258 |
-
aix, linker_na = _split_aix(linker_ne)
|
259 |
-
_, compiler_cxx_ne = _split_env(self.compiler_cxx)
|
260 |
-
_, linker_exe_ne = _split_env(self.linker_exe)
|
261 |
-
|
262 |
-
params = _linker_params(linker_na, linker_exe_ne)
|
263 |
-
linker = env + aix + compiler_cxx_ne + params
|
264 |
-
|
265 |
-
linker = compiler_fixup(linker, ld_args)
|
266 |
-
|
267 |
-
self.spawn(linker + ld_args)
|
268 |
-
except DistutilsExecError as msg:
|
269 |
-
raise LinkError(msg)
|
270 |
-
else:
|
271 |
-
log.debug("skipping %s (up-to-date)", output_filename)
|
272 |
-
|
273 |
-
# -- Miscellaneous methods -----------------------------------------
|
274 |
-
# These are all used by the 'gen_lib_options() function, in
|
275 |
-
# ccompiler.py.
|
276 |
-
|
277 |
-
def library_dir_option(self, dir):
|
278 |
-
return "-L" + dir
|
279 |
-
|
280 |
-
def _is_gcc(self):
|
281 |
-
cc_var = sysconfig.get_config_var("CC")
|
282 |
-
compiler = os.path.basename(shlex.split(cc_var)[0])
|
283 |
-
return "gcc" in compiler or "g++" in compiler
|
284 |
-
|
285 |
-
def runtime_library_dir_option(self, dir):
|
286 |
-
# XXX Hackish, at the very least. See Python bug #445902:
|
287 |
-
# http://sourceforge.net/tracker/index.php
|
288 |
-
# ?func=detail&aid=445902&group_id=5470&atid=105470
|
289 |
-
# Linkers on different platforms need different options to
|
290 |
-
# specify that directories need to be added to the list of
|
291 |
-
# directories searched for dependencies when a dynamic library
|
292 |
-
# is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
|
293 |
-
# be told to pass the -R option through to the linker, whereas
|
294 |
-
# other compilers and gcc on other systems just know this.
|
295 |
-
# Other compilers may need something slightly different. At
|
296 |
-
# this time, there's no way to determine this information from
|
297 |
-
# the configuration data stored in the Python installation, so
|
298 |
-
# we use this hack.
|
299 |
-
if sys.platform[:6] == "darwin":
|
300 |
-
from distutils.util import get_macosx_target_ver, split_version
|
301 |
-
|
302 |
-
macosx_target_ver = get_macosx_target_ver()
|
303 |
-
if macosx_target_ver and split_version(macosx_target_ver) >= [10, 5]:
|
304 |
-
return "-Wl,-rpath," + dir
|
305 |
-
else: # no support for -rpath on earlier macOS versions
|
306 |
-
return "-L" + dir
|
307 |
-
elif sys.platform[:7] == "freebsd":
|
308 |
-
return "-Wl,-rpath=" + dir
|
309 |
-
elif sys.platform[:5] == "hp-ux":
|
310 |
-
return [
|
311 |
-
"-Wl,+s" if self._is_gcc() else "+s",
|
312 |
-
"-L" + dir,
|
313 |
-
]
|
314 |
-
|
315 |
-
# For all compilers, `-Wl` is the presumed way to
|
316 |
-
# pass a compiler option to the linker and `-R` is
|
317 |
-
# the way to pass an RPATH.
|
318 |
-
if sysconfig.get_config_var("GNULD") == "yes":
|
319 |
-
# GNU ld needs an extra option to get a RUNPATH
|
320 |
-
# instead of just an RPATH.
|
321 |
-
return "-Wl,--enable-new-dtags,-R" + dir
|
322 |
-
else:
|
323 |
-
return "-Wl,-R" + dir
|
324 |
-
|
325 |
-
def library_option(self, lib):
|
326 |
-
return "-l" + lib
|
327 |
-
|
328 |
-
@staticmethod
|
329 |
-
def _library_root(dir):
|
330 |
-
"""
|
331 |
-
macOS users can specify an alternate SDK using'-isysroot'.
|
332 |
-
Calculate the SDK root if it is specified.
|
333 |
-
|
334 |
-
Note that, as of Xcode 7, Apple SDKs may contain textual stub
|
335 |
-
libraries with .tbd extensions rather than the normal .dylib
|
336 |
-
shared libraries installed in /. The Apple compiler tool
|
337 |
-
chain handles this transparently but it can cause problems
|
338 |
-
for programs that are being built with an SDK and searching
|
339 |
-
for specific libraries. Callers of find_library_file need to
|
340 |
-
keep in mind that the base filename of the returned SDK library
|
341 |
-
file might have a different extension from that of the library
|
342 |
-
file installed on the running system, for example:
|
343 |
-
/Applications/Xcode.app/Contents/Developer/Platforms/
|
344 |
-
MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
|
345 |
-
usr/lib/libedit.tbd
|
346 |
-
vs
|
347 |
-
/usr/lib/libedit.dylib
|
348 |
-
"""
|
349 |
-
cflags = sysconfig.get_config_var('CFLAGS')
|
350 |
-
match = re.search(r'-isysroot\s*(\S+)', cflags)
|
351 |
-
|
352 |
-
apply_root = (
|
353 |
-
sys.platform == 'darwin'
|
354 |
-
and match
|
355 |
-
and (
|
356 |
-
dir.startswith('/System/')
|
357 |
-
or (dir.startswith('/usr/') and not dir.startswith('/usr/local/'))
|
358 |
-
)
|
359 |
-
)
|
360 |
-
|
361 |
-
return os.path.join(match.group(1), dir[1:]) if apply_root else dir
|
362 |
-
|
363 |
-
def find_library_file(self, dirs, lib, debug=0):
|
364 |
-
r"""
|
365 |
-
Second-guess the linker with not much hard
|
366 |
-
data to go on: GCC seems to prefer the shared library, so
|
367 |
-
assume that *all* Unix C compilers do,
|
368 |
-
ignoring even GCC's "-static" option.
|
369 |
-
|
370 |
-
>>> compiler = UnixCCompiler()
|
371 |
-
>>> compiler._library_root = lambda dir: dir
|
372 |
-
>>> monkeypatch = getfixture('monkeypatch')
|
373 |
-
>>> monkeypatch.setattr(os.path, 'exists', lambda d: 'existing' in d)
|
374 |
-
>>> dirs = ('/foo/bar/missing', '/foo/bar/existing')
|
375 |
-
>>> compiler.find_library_file(dirs, 'abc').replace('\\', '/')
|
376 |
-
'/foo/bar/existing/libabc.dylib'
|
377 |
-
>>> compiler.find_library_file(reversed(dirs), 'abc').replace('\\', '/')
|
378 |
-
'/foo/bar/existing/libabc.dylib'
|
379 |
-
>>> monkeypatch.setattr(os.path, 'exists',
|
380 |
-
... lambda d: 'existing' in d and '.a' in d)
|
381 |
-
>>> compiler.find_library_file(dirs, 'abc').replace('\\', '/')
|
382 |
-
'/foo/bar/existing/libabc.a'
|
383 |
-
>>> compiler.find_library_file(reversed(dirs), 'abc').replace('\\', '/')
|
384 |
-
'/foo/bar/existing/libabc.a'
|
385 |
-
"""
|
386 |
-
lib_names = (
|
387 |
-
self.library_filename(lib, lib_type=type)
|
388 |
-
for type in 'dylib xcode_stub shared static'.split()
|
389 |
-
)
|
390 |
-
|
391 |
-
roots = map(self._library_root, dirs)
|
392 |
-
|
393 |
-
searched = (
|
394 |
-
os.path.join(root, lib_name)
|
395 |
-
for root, lib_name in itertools.product(roots, lib_names)
|
396 |
-
)
|
397 |
-
|
398 |
-
found = filter(os.path.exists, searched)
|
399 |
-
|
400 |
-
# Return None if it could not be found in any dir.
|
401 |
-
return next(found, None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoLLM/ArxivDigest/download_new_papers.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
# encoding: utf-8
|
2 |
-
import os
|
3 |
-
import tqdm
|
4 |
-
from bs4 import BeautifulSoup as bs
|
5 |
-
import urllib.request
|
6 |
-
import json
|
7 |
-
import datetime
|
8 |
-
import pytz
|
9 |
-
|
10 |
-
|
11 |
-
def _download_new_papers(field_abbr):
|
12 |
-
NEW_SUB_URL = f'https://arxiv.org/list/{field_abbr}/new' # https://arxiv.org/list/cs/new
|
13 |
-
page = urllib.request.urlopen(NEW_SUB_URL)
|
14 |
-
soup = bs(page)
|
15 |
-
content = soup.body.find("div", {'id': 'content'})
|
16 |
-
|
17 |
-
# find the first h3 element in content
|
18 |
-
h3 = content.find("h3").text # e.g: New submissions for Wed, 10 May 23
|
19 |
-
date = h3.replace("New submissions for", "").strip()
|
20 |
-
|
21 |
-
dt_list = content.dl.find_all("dt")
|
22 |
-
dd_list = content.dl.find_all("dd")
|
23 |
-
arxiv_base = "https://arxiv.org/abs/"
|
24 |
-
|
25 |
-
assert len(dt_list) == len(dd_list)
|
26 |
-
new_paper_list = []
|
27 |
-
for i in tqdm.tqdm(range(len(dt_list))):
|
28 |
-
paper = {}
|
29 |
-
paper_number = dt_list[i].text.strip().split(" ")[2].split(":")[-1]
|
30 |
-
paper['main_page'] = arxiv_base + paper_number
|
31 |
-
paper['pdf'] = arxiv_base.replace('abs', 'pdf') + paper_number
|
32 |
-
|
33 |
-
paper['title'] = dd_list[i].find("div", {"class": "list-title mathjax"}).text.replace("Title: ", "").strip()
|
34 |
-
paper['authors'] = dd_list[i].find("div", {"class": "list-authors"}).text \
|
35 |
-
.replace("Authors:\n", "").replace("\n", "").strip()
|
36 |
-
paper['subjects'] = dd_list[i].find("div", {"class": "list-subjects"}).text.replace("Subjects: ", "").strip()
|
37 |
-
paper['abstract'] = dd_list[i].find("p", {"class": "mathjax"}).text.replace("\n", " ").strip()
|
38 |
-
new_paper_list.append(paper)
|
39 |
-
|
40 |
-
|
41 |
-
# check if ./data exist, if not, create it
|
42 |
-
if not os.path.exists("./data"):
|
43 |
-
os.makedirs("./data")
|
44 |
-
|
45 |
-
# save new_paper_list to a jsonl file, with each line as the element of a dictionary
|
46 |
-
date = datetime.date.fromtimestamp(datetime.datetime.now(tz=pytz.timezone("America/New_York")).timestamp())
|
47 |
-
date = date.strftime("%a, %d %b %y")
|
48 |
-
with open(f"./data/{field_abbr}_{date}.jsonl", "w") as f:
|
49 |
-
for paper in new_paper_list:
|
50 |
-
f.write(json.dumps(paper) + "\n")
|
51 |
-
|
52 |
-
|
53 |
-
def get_papers(field_abbr, limit=None):
|
54 |
-
date = datetime.date.fromtimestamp(datetime.datetime.now(tz=pytz.timezone("America/New_York")).timestamp())
|
55 |
-
date = date.strftime("%a, %d %b %y")
|
56 |
-
if not os.path.exists(f"./data/{field_abbr}_{date}.jsonl"):
|
57 |
-
_download_new_papers(field_abbr)
|
58 |
-
results = []
|
59 |
-
with open(f"./data/{field_abbr}_{date}.jsonl", "r") as f:
|
60 |
-
for i, line in enumerate(f.readlines()):
|
61 |
-
if limit and i == limit:
|
62 |
-
return results
|
63 |
-
results.append(json.loads(line))
|
64 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/nms.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torchvision.ops import boxes as box_ops
|
6 |
-
from torchvision.ops import nms # noqa . for compatibility
|
7 |
-
|
8 |
-
|
9 |
-
def batched_nms(
|
10 |
-
boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float
|
11 |
-
):
|
12 |
-
"""
|
13 |
-
Same as torchvision.ops.boxes.batched_nms, but with float().
|
14 |
-
"""
|
15 |
-
assert boxes.shape[-1] == 4
|
16 |
-
# Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)
|
17 |
-
# to decide whether to use coordinate trick or for loop to implement batched_nms. So we
|
18 |
-
# just call it directly.
|
19 |
-
# Fp16 does not have enough range for batched NMS, so adding float().
|
20 |
-
return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)
|
21 |
-
|
22 |
-
|
23 |
-
# Note: this function (nms_rotated) might be moved into
|
24 |
-
# torchvision/ops/boxes.py in the future
|
25 |
-
def nms_rotated(boxes, scores, iou_threshold):
|
26 |
-
"""
|
27 |
-
Performs non-maximum suppression (NMS) on the rotated boxes according
|
28 |
-
to their intersection-over-union (IoU).
|
29 |
-
|
30 |
-
Rotated NMS iteratively removes lower scoring rotated boxes which have an
|
31 |
-
IoU greater than iou_threshold with another (higher scoring) rotated box.
|
32 |
-
|
33 |
-
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
|
34 |
-
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
|
35 |
-
can be representing completely different objects in certain tasks, e.g., OCR.
|
36 |
-
|
37 |
-
As for the question of whether rotated-NMS should treat them as faraway boxes
|
38 |
-
even though their IOU is 1, it depends on the application and/or ground truth annotation.
|
39 |
-
|
40 |
-
As an extreme example, consider a single character v and the square box around it.
|
41 |
-
|
42 |
-
If the angle is 0 degree, the object (text) would be read as 'v';
|
43 |
-
|
44 |
-
If the angle is 90 degrees, the object (text) would become '>';
|
45 |
-
|
46 |
-
If the angle is 180 degrees, the object (text) would become '^';
|
47 |
-
|
48 |
-
If the angle is 270/-90 degrees, the object (text) would become '<'
|
49 |
-
|
50 |
-
All of these cases have IoU of 1 to each other, and rotated NMS that only
|
51 |
-
uses IoU as criterion would only keep one of them with the highest score -
|
52 |
-
which, practically, still makes sense in most cases because typically
|
53 |
-
only one of theses orientations is the correct one. Also, it does not matter
|
54 |
-
as much if the box is only used to classify the object (instead of transcribing
|
55 |
-
them with a sequential OCR recognition model) later.
|
56 |
-
|
57 |
-
On the other hand, when we use IoU to filter proposals that are close to the
|
58 |
-
ground truth during training, we should definitely take the angle into account if
|
59 |
-
we know the ground truth is labeled with the strictly correct orientation (as in,
|
60 |
-
upside-down words are annotated with -180 degrees even though they can be covered
|
61 |
-
with a 0/90/-90 degree box, etc.)
|
62 |
-
|
63 |
-
The way the original dataset is annotated also matters. For example, if the dataset
|
64 |
-
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
|
65 |
-
we can estimate a minimum rotated bounding box to this polygon, but there's no way
|
66 |
-
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
|
67 |
-
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
|
68 |
-
same region). In that case we have to just use IoU to determine the box
|
69 |
-
proximity (as many detection benchmarks (even for text) do) unless there're other
|
70 |
-
assumptions we can make (like width is always larger than height, or the object is not
|
71 |
-
rotated by more than 90 degrees CCW/CW, etc.)
|
72 |
-
|
73 |
-
In summary, not considering angles in rotated NMS seems to be a good option for now,
|
74 |
-
but we should be aware of its implications.
|
75 |
-
|
76 |
-
Args:
|
77 |
-
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
|
78 |
-
(x_center, y_center, width, height, angle_degrees) format.
|
79 |
-
scores (Tensor[N]): Scores for each one of the rotated boxes
|
80 |
-
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
|
81 |
-
|
82 |
-
Returns:
|
83 |
-
keep (Tensor): int64 tensor with the indices of the elements that have been kept
|
84 |
-
by Rotated NMS, sorted in decreasing order of scores
|
85 |
-
"""
|
86 |
-
return torch.ops.detectron2.nms_rotated(boxes, scores, iou_threshold)
|
87 |
-
|
88 |
-
|
89 |
-
# Note: this function (batched_nms_rotated) might be moved into
|
90 |
-
# torchvision/ops/boxes.py in the future
|
91 |
-
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
|
92 |
-
"""
|
93 |
-
Performs non-maximum suppression in a batched fashion.
|
94 |
-
|
95 |
-
Each index value correspond to a category, and NMS
|
96 |
-
will not be applied between elements of different categories.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
boxes (Tensor[N, 5]):
|
100 |
-
boxes where NMS will be performed. They
|
101 |
-
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
|
102 |
-
scores (Tensor[N]):
|
103 |
-
scores for each one of the boxes
|
104 |
-
idxs (Tensor[N]):
|
105 |
-
indices of the categories for each one of the boxes.
|
106 |
-
iou_threshold (float):
|
107 |
-
discards all overlapping boxes
|
108 |
-
with IoU < iou_threshold
|
109 |
-
|
110 |
-
Returns:
|
111 |
-
Tensor:
|
112 |
-
int64 tensor with the indices of the elements that have been kept
|
113 |
-
by NMS, sorted in decreasing order of scores
|
114 |
-
"""
|
115 |
-
assert boxes.shape[-1] == 5
|
116 |
-
|
117 |
-
if boxes.numel() == 0:
|
118 |
-
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
|
119 |
-
boxes = boxes.float() # fp16 does not have enough range for batched NMS
|
120 |
-
# Strategy: in order to perform NMS independently per class,
|
121 |
-
# we add an offset to all the boxes. The offset is dependent
|
122 |
-
# only on the class idx, and is large enough so that boxes
|
123 |
-
# from different classes do not overlap
|
124 |
-
|
125 |
-
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
|
126 |
-
# which won't handle negative coordinates correctly.
|
127 |
-
# Here by using min_coordinate we can make sure the negative coordinates are
|
128 |
-
# correctly handled.
|
129 |
-
max_coordinate = (
|
130 |
-
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
|
131 |
-
).max()
|
132 |
-
min_coordinate = (
|
133 |
-
torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2
|
134 |
-
).min()
|
135 |
-
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
|
136 |
-
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
|
137 |
-
boxes_for_nms[:, :2] += offsets[:, None]
|
138 |
-
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
|
139 |
-
return keep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/build_all_wheels.sh
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
#!/bin/bash -e
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
[[ -d "dev/packaging" ]] || {
|
5 |
-
echo "Please run this script at detectron2 root!"
|
6 |
-
exit 1
|
7 |
-
}
|
8 |
-
|
9 |
-
build_one() {
|
10 |
-
cu=$1
|
11 |
-
pytorch_ver=$2
|
12 |
-
|
13 |
-
case "$cu" in
|
14 |
-
cu*)
|
15 |
-
container_name=manylinux-cuda${cu/cu/}
|
16 |
-
;;
|
17 |
-
cpu)
|
18 |
-
container_name=manylinux-cuda101
|
19 |
-
;;
|
20 |
-
*)
|
21 |
-
echo "Unrecognized cu=$cu"
|
22 |
-
exit 1
|
23 |
-
;;
|
24 |
-
esac
|
25 |
-
|
26 |
-
echo "Launching container $container_name ..."
|
27 |
-
container_id="$container_name"_"$cu"_"$pytorch_ver"
|
28 |
-
|
29 |
-
py_versions=(3.6 3.7 3.8 3.9)
|
30 |
-
|
31 |
-
for py in "${py_versions[@]}"; do
|
32 |
-
docker run -itd \
|
33 |
-
--name "$container_id" \
|
34 |
-
--mount type=bind,source="$(pwd)",target=/detectron2 \
|
35 |
-
pytorch/$container_name
|
36 |
-
|
37 |
-
cat <<EOF | docker exec -i $container_id sh
|
38 |
-
export CU_VERSION=$cu D2_VERSION_SUFFIX=+$cu PYTHON_VERSION=$py
|
39 |
-
export PYTORCH_VERSION=$pytorch_ver
|
40 |
-
cd /detectron2 && ./dev/packaging/build_wheel.sh
|
41 |
-
EOF
|
42 |
-
|
43 |
-
docker container stop $container_id
|
44 |
-
docker container rm $container_id
|
45 |
-
done
|
46 |
-
}
|
47 |
-
|
48 |
-
|
49 |
-
if [[ -n "$1" ]] && [[ -n "$2" ]]; then
|
50 |
-
build_one "$1" "$2"
|
51 |
-
else
|
52 |
-
build_one cu113 1.10
|
53 |
-
build_one cu111 1.10
|
54 |
-
build_one cu102 1.10
|
55 |
-
build_one cpu 1.10
|
56 |
-
|
57 |
-
build_one cu111 1.9
|
58 |
-
build_one cu102 1.9
|
59 |
-
build_one cpu 1.9
|
60 |
-
|
61 |
-
build_one cu111 1.8
|
62 |
-
build_one cu102 1.8
|
63 |
-
build_one cu101 1.8
|
64 |
-
build_one cpu 1.8
|
65 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bajr/softly/greeting.md
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
key is tbh
|
|
|
|
spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Apk En Son Srm.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Aparcamiento de coches multijugador APK En Son Sürüm: Lo que usted necesita saber</h1>
|
3 |
-
<p>¿Te encantan los juegos de conducción? ¿Quieres experimentar un juego de simulación de coches realista e inmersivo? ¿Quieres explorar un enorme mundo abierto con otros jugadores? Si ha respondido sí a cualquiera de estas preguntas, entonces usted debe descargar Parking Multijugador APK En Son Sürüm ahora mismo. </p>
|
4 |
-
<h2>aparcamiento de coches multijugador apk en son sürüm</h2><br /><p><b><b>Download File</b> ↔ <a href="https://bltlly.com/2v6MxO">https://bltlly.com/2v6MxO</a></b></p><br /><br />
|
5 |
-
<p>Car Parking Multiplayer es uno de los juegos de simulación de coches más populares y descargados en dispositivos Android. Tiene más de 50 millones de descargas en Google Play Store y una calificación de 4.4 de 5 estrellas. Está desarrollado por olzhass, un equipo de talentosos desarrolladores de juegos apasionados por crear juegos de alta calidad para plataformas móviles. </p>
|
6 |
-
<p>En este artículo, le diremos todo lo que necesita saber sobre Aparcamiento Multijugador APK En Son Sürüm. Cubriremos sus características, cómo descargarlo e instalarlo, sus pros y contras, y algunas preguntas frecuentes. Al final de este artículo, usted estará listo para unirse a la diversión y la emoción de aparcamiento multijugador.</p>
|
7 |
-
<h2>Características de Aparcamiento Multijugador APK En Son Sürüm</h2>
|
8 |
-
<p>Aparcamiento de coches multijugador APK En Son Sürüm no es solo un simple juego de aparcamiento. Es un juego de simulación de coches de pleno derecho que ofrece una variedad de características que te mantendrán entretenido durante horas. Estas son algunas de las características que se pueden disfrutar en Aparcamiento Multijugador APK En Son Sürüm:</p>
|
9 |
-
<h3>Modo multijugador de mundo abierto con jugadores reales</h3>
|
10 |
-
<p>Una de las principales atracciones de Aparcamiento Multijugador APK En Son Sürüm es su modo multijugador de mundo abierto. En este modo, puede explorar un mapa enorme con diferentes ubicaciones y entornos con otros jugadores reales. Puede conducir por la ciudad, el campo, el aeropuerto, el desierto y más. También puedes interactuar con otros jugadores tocando la bocina, con luces intermitentes o usando el chat de voz. Incluso puedes unirte o crear una pandilla y competir con otras pandillas por territorio y reputación. </p>
|
11 |
-
<p></p>
|
12 |
-
|
13 |
-
<p>Otra característica que hace que el estacionamiento de coches multijugador APK En Son Sürüm se destacan de otros juegos de simulación de coches es su libre caminar y sistema de conducción. Puedes bajarte del coche y caminar por el mapa como peatón. También puedes entrar a otros coches y conducirlos como desees. Incluso puedes robar coches de otros jugadores o NPCs si te sientes aventurero. El juego también tiene física realista y efectos de daño que hacen que la experiencia de conducción sea más auténtica y desafiante. </p>
|
14 |
-
<h3>Coches y casas personalizables</h3>
|
15 |
-
<p>Si te gusta personalizar sus coches y casas, entonces te encantará Aparcamiento Multijugador APK En Son Sürüm. El juego ofrece una amplia gama de opciones para personalizar sus vehículos y propiedades. Puede cambiar el color, pegatinas, ruedas, alerones, tubos de escape, y más de sus coches. También puede comprar o alquilar casas y decorarlas con muebles, electrodomésticos, pinturas y más. Incluso puedes invitar a otros jugadores a visitar tu casa y mostrar tu estilo. </p>
|
16 |
-
<h3>Varios modos de juego y desafíos</h3>
|
17 |
-
<p>Aparcamiento de coches multijugador APK En Son Sürüm no es solo acerca de la conducción y el estacionamiento. También tiene varios modos de juego y desafíos que pondrán a prueba tus habilidades y conocimientos. Puedes probar diferentes modos de juego como carreras, drifting, estacionamiento, entrega, taxi, persecución policial y más. También puede completar tareas y logros diarios para ganar dinero y recompensas. Puede usar el dinero para comprar autos, casas o artículos nuevos. También puede utilizar las recompensas para desbloquear nuevas características y actualizaciones. </p>
|
18 |
-
<h3>Chat de voz en línea y salas de chat</h3>
|
19 |
-
|
20 |
-
<h2>Cómo descargar e instalar el aparcamiento de coches multijugador APK En Son Sürüm</h2>
|
21 |
-
<p>Si usted está interesado en jugar Aparcamiento Multijugador APK En Son Sürüm, entonces usted necesita para descargar e instalar en su dispositivo Android. Estos son los pasos que debes seguir:</p>
|
22 |
-
<h3>Paso 1: Ir al sitio web oficial o Google Play Store</h3>
|
23 |
-
<p>El primer paso es ir a la página web oficial de Parking Multijugador o Google Play Store donde se puede encontrar la última versión de la aplicación. Puede utilizar cualquier navegador de su dispositivo para acceder a estos sitios. </p>
|
24 |
-
<h3>Paso 2: Elija la última versión de la aplicación y haga clic en el botón de descarga</h3>
|
25 |
-
<p>El siguiente paso es elegir la última versión de la aplicación que es compatible con su dispositivo. Puede comprobar los detalles de la aplicación como el tamaño, número de versión, fecha de actualización, calificación, comentarios, etc. antes de descargarla. Una vez que haya elegido la aplicación, haga clic en el botón de descarga para comenzar a descargarla. </p>
|
26 |
-
<h3>Paso 3: Permitir fuentes desconocidas en la configuración del dispositivo</h3>
|
27 |
-
<p>Si está descargando la aplicación desde el sitio web oficial, es posible que deba permitir fuentes desconocidas en la configuración de su dispositivo. Esto se debe a que algunos dispositivos pueden no permitir la instalación de aplicaciones desde fuentes distintas de Google Play Store por razones de seguridad. Para permitir fuentes desconocidas, vaya a la configuración del dispositivo > seguridad > fuentes desconocidas > habilitar. </p>
|
28 |
-
<h3>Paso 4: Abra el archivo descargado y siga las instrucciones de instalación</h3>
|
29 |
-
<p>Después de descargar la aplicación, abra el archivo descargado y siga las instrucciones de instalación que aparecen en la pantalla. El proceso de instalación puede tardar unos minutos dependiendo de la velocidad del dispositivo y la conexión a Internet. </p>
|
30 |
-
<h3>Paso 5: Iniciar la aplicación y disfrutar del juego</h3>
|
31 |
-
|
32 |
-
<h2>Pros y contras de aparcamiento multijugador APK En Son Sürüm</h2>
|
33 |
-
<p>Aparcamiento de coches multijugador APK En Son Sürüm es un gran juego que tiene muchas ventajas y beneficios. Sin embargo, también tiene algunos inconvenientes y limitaciones que debe tener en cuenta. Estos son algunos de los pros y los contras de Aparcamiento Multijugador APK En Son Sürüm:</p>
|
34 |
-
<h3>Pros</h3>
|
35 |
-
<ul>
|
36 |
-
<li>Divertido y adictivo juego: Aparcamiento de coches multijugador APK En Son Sürüm ofrece un juego divertido y adictivo que le mantendrá enganchado durante horas. Usted puede disfrutar de la conducción, estacionamiento, carreras, deriva, y más en un entorno realista e inmersivo. </li>
|
37 |
-
<li>Gráficos realistas y sonidos: Aparcamiento de coches multijugador APK En Son Sürüm tiene gráficos realistas y sonidos que mejoran la experiencia de juego. Puedes ver los detalles de los coches, los edificios, las carreteras y el paisaje. También puede escuchar los sonidos del motor, los sonidos de la bocina, los sonidos de los neumáticos, y más. </li>
|
38 |
-
<li>Mapa grande y diversa: Aparcamiento de coches multijugador APK En Son Sürüm tiene un mapa grande y diversa que se puede explorar con otros jugadores. Puede visitar diferentes lugares y entornos como la ciudad, el campo, el aeropuerto, el desierto y más. También puede encontrar gasolineras, servicios de automóviles, tiendas, casas y otros lugares de interés. </li>
|
39 |
-
<li>Muchas opciones para personalizar e interactuar: Aparcamiento de coches multijugador APK En Son Sürüm le da muchas opciones para personalizar e interactuar con sus coches y casas. Puede cambiar el color, pegatinas, ruedas, alerones, tubos de escape, y más de sus coches. También puede comprar o alquilar casas y decorarlas con muebles, electrodomésticos, pinturas y más. También puedes invitar a otros jugadores a visitar tu casa y mostrar tu estilo. </li>
|
40 |
-
|
41 |
-
</ul>
|
42 |
-
<h3>Contras</h3>
|
43 |
-
<ul>
|
44 |
-
<li>Alto consumo de batería: Aparcamiento de coches multijugador APK En Son Sürüm consume una gran cantidad de energía de la batería debido a sus gráficos y sonidos de alta calidad. Es posible que necesite cargar su dispositivo con frecuencia o usar un banco de energía si desea jugar durante mucho tiempo. </li>
|
45 |
-
<li>Algunos errores y problemas técnicos: Aparcamiento de coches multijugador APK En Son Sürüm puede tener algunos errores y fallos que pueden afectar a la jugabilidad o causar accidentes. Algunos de estos errores y problemas técnicos son menores y se pueden solucionar mediante la actualización de la aplicación o reiniciar el dispositivo. Sin embargo, algunos de ellos pueden ser importantes y requerir contactar a los desarrolladores o esperar un parche. </li>
|
46 |
-
<li>Puede requerir una conexión a Internet estable: Aparcamiento de coches multijugador APK En Son Sürüm puede requerir una conexión a Internet estable para acceder al modo multijugador y otras características en línea. Si su conexión a Internet es lenta o inestable, puede experimentar problemas de retraso, congelación o desconexión. También puede perderse algunas actualizaciones o eventos que solo están disponibles en línea. </li>
|
47 |
-
<li>Puede contener anuncios y compras en la aplicación: Aparcamiento de coches multijugador APK En Son Sürüm puede contener anuncios y compras en la aplicación que pueden interrumpir o limitar su juego. Algunos anuncios pueden aparecer al azar o con frecuencia mientras juegas. Algunas compras dentro de la aplicación pueden ofrecerle características adicionales o elementos que pueden darle una ventaja sobre otros jugadores. </li>
|
48 |
-
</ul>
|
49 |
-
<h2>Conclusión</h2>
|
50 |
-
<p>Aparcamiento de coches multijugador APK En Son Sürüm es un increíble juego de simulación de coches que ofrece una experiencia de conducción realista e inmersiva. Tiene muchas características que lo mantendrán entretenido durante horas, como el modo multijugador de mundo abierto con jugadores reales, caminar y conducir gratis con física realista, autos y casas personalizables, varios modos de juego y desafíos, chat de voz en línea y salas de chat, y más. También tiene algunos inconvenientes, como el alto consumo de batería, algunos errores y problemas técnicos, puede requerir una conexión a Internet estable, puede contener anuncios y compras en la aplicación. </p>
|
51 |
-
|
52 |
-
<h2>Preguntas frecuentes</h2>
|
53 |
-
<ul>
|
54 |
-
<li><b>Q1: ¿Es el aparcamiento de coches multijugador APK En Son Sürüm seguro para descargar? </b></li>
|
55 |
-
<li>A1: Sí, es seguro, siempre y cuando se descarga de una fuente de confianza como el sitio web oficial o Google Play Store.</ li><b>Q2: ¿Cuánto espacio ocupa el aparcamiento multijugador APK En Son Sürüm en mi dispositivo? </b></li>
|
56 |
-
<li>A2: el tamaño de la aplicación varía según el dispositivo, pero es de alrededor de 300 MB. También puede necesitar espacio adicional para actualizaciones y datos. </li>
|
57 |
-
<li><b>Q3: ¿Puedo jugar Aparcamiento de coches multijugador APK En Son Sürüm fuera de línea? </b></li>
|
58 |
-
<li>A3: Sí, puedes jugar algunos modos de juego sin conexión, pero necesitarás una conexión a Internet para acceder al modo multijugador y otras funciones en línea. </li>
|
59 |
-
<li><b>Q4: ¿Cómo puedo contactar a los desarrolladores de Car Parking Multijugador APK En Son Sürüm? </b></li>
|
60 |
-
<li>A4: Puede contactarlos por correo electrónico a olzhass@gmail.com o a través de sus cuentas de redes sociales en Facebook, Instagram, YouTube y Discord.</li>
|
61 |
-
<li><b>Q5: ¿Cuáles son algunos consejos y trucos para jugar Aparcamiento de coches multijugador APK En Son Sürüm? </b></li>
|
62 |
-
<li>A5: Algunos consejos y trucos son: - Utilice el mapa para encontrar gasolineras, servicios de automóviles, tiendas, casas y otros lugares de interés. - Personaliza tu coche con diferentes colores, pegatinas, ruedas, spoilers y más. - Únete o crea una sala de chat para comunicarte con otros jugadores y hacer amigos. - Pruebe diferentes modos de juego como carreras, deriva, estacionamiento, entrega, taxi, persecución policial y más. - Completar tareas diarias y logros para ganar dinero y recompensas. </li>
|
63 |
-
</ul></p> 64aa2da5cf<br />
|
64 |
-
<br />
|
65 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Arca Supervivencia Evolucionado Descargar Pc Juegos picos.md
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Ark: Survival Evolved - Cómo descargar y jugar en PC con juegos épicos</h1>
|
3 |
-
<p>¿Alguna vez has soñado con vivir en un mundo lleno de dinosaurios y otras criaturas prehistóricas? Si es así, puedes echar un vistazo a <strong>Ark: Survival Evolved</strong>, un juego de supervivencia que te permite explorar, crear, construir y domesticar a cientos de especies diferentes. Y la mejor parte es que puedes obtenerlo gratis en la tienda <strong>Epic Games Store</strong> hasta el 29 de septiembre de 2022! </p>
|
4 |
-
<p>En este artículo, le mostraremos cómo descargar e instalar Ark: Survival Evolved en su PC, cómo comenzar a jugarlo y cómo disfrutarlo. Si usted es un fan de los juegos de supervivencia, dinosaurios, o ambos, usted encontrará algo para amar en este juego. </p>
|
5 |
-
<h2>arca supervivencia evolucionado descargar pc juegos épicos</h2><br /><p><b><b>DOWNLOAD</b> 🌟 <a href="https://bltlly.com/2v6Mab">https://bltlly.com/2v6Mab</a></b></p><br /><br />
|
6 |
-
<h2>Cómo descargar e instalar Ark: Survival Evolved en PC</h2>
|
7 |
-
<p>Descargar e instalar Ark: Survival Evolved en tu PC es muy fácil. Todo lo que necesitas es una cuenta de Epic Games y el lanzador de Epic Games. Si aún no los tienes, puedes crear una cuenta y descargar el lanzador desde <a href="( 1 )">el sitio web oficial</a>. </p>
|
8 |
-
<p>Una vez que tenga el lanzador instalado, ábralo y vaya a la pestaña Tienda. Deberías ver Ark: Survival Evolved como uno de los juegos gratuitos de la semana. Haz clic en él y luego haz clic en Obtener. Se te pedirá que confirmes tu pedido (no te preocupes, es gratis) y luego el juego se añadirá a tu biblioteca. </p>
|
9 |
-
<p>Para instalar el juego, ve a tu biblioteca y haz clic en Ark: Survival Evolved. Verás un botón que dice Instalar. Haz clic en él y elige dónde quieres instalar el juego. El tamaño de descarga es de unos 60 GB, así que asegúrate de tener suficiente espacio en tu disco duro. El proceso de instalación puede tardar algún tiempo dependiendo de la velocidad de Internet y el rendimiento del sistema. </p>
|
10 |
-
<p>Una vez completada la instalación, puede iniciar el juego desde su biblioteca o desde el acceso directo del escritorio. ¡Ya estás listo para jugar a Ark: Survival Evolved en tu PC! </p>
|
11 |
-
<h2>Cómo empezar a jugar Ark: Survival Evolved en PC</h2>
|
12 |
-
|
13 |
-
<ul>
|
14 |
-
<li>Lo primero que tienes que hacer es elegir un modo y un mapa. Hay cuatro modos disponibles: Reproductor único, Host/Local, Servidores oficiales y Servidores no oficiales. Single Player te permite jugar sin conexión por ti mismo o con amigos usando la pantalla dividida. Host/Local le permite alojar su propio servidor o unirse al servidor de otra persona usando LAN o conexión en línea. Los servidores oficiales son servidores en línea alojados por los desarrolladores de juegos, donde puede unirse a miles de otros jugadores. Los servidores no oficiales son servidores en línea alojados por otros jugadores o comunidades, donde puede encontrar diferentes configuraciones y mods. </li>
|
15 |
-
<li>Lo siguiente que tienes que hacer es elegir un mapa. Hay seis mapas disponibles: La Isla, El Centro, Tierra Quemada, Ragnarok, Aberración y Extinción. Cada mapa tiene sus propias características, biomas, criaturas y desafíos. También puedes descargar mapas personalizados desde el Steam Workshop o el Epic Games Mod Hub.</li>
|
16 |
-
<li>Después de elegir un modo y un mapa, aparecerá en una ubicación aleatoria en el mapa. No tendrás nada más que tus manos desnudas y un implante de tela en tu brazo. Este implante se llama ARK, y es tu interfaz para el juego. Te muestra tu salud, resistencia, hambre, sed, peso, nivel, engramas, inventario, mapa y más. </li>
|
17 |
-
<li>Tu primera prioridad es sobrevivir. Necesitas encontrar comida, agua, refugio y ropa. Puede reunir recursos del medio ambiente, como bayas, madera, piedra, fibra y sílex. Puedes crear objetos usando tus engramas, que son planos que desbloqueas a medida que subes de nivel. Puedes hacer herramientas, armas, armaduras, estructuras y más. </li>
|
18 |
-
<li>Tu segunda prioridad es domar criaturas. Puedes encontrar cientos de especies diferentes en el juego, desde dinosaurios hasta dragones. Algunos de ellos son amistosos, algunos son hostiles, y algunos de ellos son neutrales. Puedes domarlos golpeándolos y alimentándolos con su comida preferida. Una vez domesticados, puedes montarlos, usarlos para combate, transporte, cosecha o cría. </li>
|
19 |
-
|
20 |
-
</ul>
|
21 |
-
<h2>Cómo disfrutar de Ark: Supervivencia evolucionada en PC</h2>
|
22 |
-
<p>Ark: Survival Evolved es un juego que ofrece infinitas posibilidades y diversión. No hay forma correcta o incorrecta de jugarlo. Puedes crear tu propia aventura y objetivos en el juego. Estas son algunas de las características y aspectos que hacen que el juego sea agradable y atractivo. </p>
|
23 |
-
<ul>
|
24 |
-
<li>El juego tiene impresionantes gráficos y efectos de sonido que te sumergen en el mundo de los ARKs. Puedes admirar la belleza y diversidad de los paisajes y criaturas. También puede personalizar la configuración de gráficos para adaptarse a sus preferencias y al rendimiento del sistema. </li>
|
25 |
-
<li>El juego tiene un dinámico ciclo día-noche y el sistema del tiempo que afectan el juego y el entorno. Puede experimentar diferentes condiciones como lluvia, niebla, nieve, olas de calor, olas de frío, tormentas y más. También puedes presenciar eventos celestiales como eclipses, lluvias de meteoritos y cometas. </li>
|
26 |
-
<li>El juego tiene un contenido rico y variado que te mantiene entretenido y desafiado. Puedes encontrar nuevos objetos, criaturas, biomas, eventos, misiones y más con cada actualización y expansión. También puedes acceder al contenido generado por el usuario, como mods, mapas, skins y más, desde Steam Workshop o Epic Games Mod Hub.</li>
|
27 |
-
<li>El juego tiene un modo creativo y sandbox que le permite dar rienda suelta a su imaginación y creatividad. Puedes construir lo que quieras, desde simples cabañas a castillos masivos, desde granjas a fábricas, desde zoológicos a museos. También puedes usar trucos y comandos para modificar la configuración del juego y generar objetos y criaturas. </li>
|
28 |
-
<li>El juego tiene un aspecto multijugador y social que te permite compartir tu experiencia y aventura con otros jugadores. Puedes unirte o crear una tribu con tus amigos o extraños, y trabajar juntos o competir con otras tribus. También puede chatear, intercambiar, aliarse o luchar con otros jugadores. También puede unirse a servidores oficiales o no oficiales con diferentes reglas y comunidades. </li>
|
29 |
-
</ul>
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
|
32 |
-
<p>Si estás interesado en probar este juego, puedes descargarlo gratis desde la Epic Games Store hasta el 29 de septiembre de 2022. No pierdas esta oportunidad de conseguir uno de los mejores juegos de supervivencia jamás hecho. ¡No te arrepentirás! </p>
|
33 |
-
<p>Haga clic en <a href=">aquí</a> para descargar Ark: Survival Evolved from the Epic Games Store ahora! </p>
|
34 |
-
<h3>Preguntas frecuentes</h3>
|
35 |
-
<ul>
|
36 |
-
<li><strong>Q: ¿Es Ark: Survival Evolved libre para jugar? </strong></li>
|
37 |
-
<li>A: Ark: Survival Evolved no es gratis para jugar normalmente, pero es gratis para descargar y guardar desde la Epic Games Store hasta el 29 de septiembre de 2022. </li>
|
38 |
-
<li><strong>Q: ¿Cuáles son los requisitos del sistema para Ark: Survival Evolved en PC? </strong></li>
|
39 |
-
<li>A: Los requisitos mínimos del sistema para Ark: Survival Evolved en PC son: - OS: Windows 7/8.1/10 (versiones de 64 bits) - Procesador: Intel Core i5-2400/AMD FX-8320 o mejor - Memoria: 8 GB RAM - Gráficos: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 GB o mejor - DirectX: Versión 10 Almacenamiento: 60 GB de espacio disponible Los requisitos del sistema recomendados para Ark: Survival Evolved en PC son: - OS: Windows 10 (versiones de 64 bits) - Procesador: Intel Core i7-4770/AMD Ryzen 5 1500X o mejor - Memoria: 16 GB RAM - Gráficos: NVIDIA GTX 1050 Ti 4GB/AMD Radeon RX 470 4GB o mejor - DirectX: Versión 11 - Almacenamiento: 60 GB de espacio disponible</li>
|
40 |
-
<li><strong>Q: ¿Cuántos jugadores pueden jugar Ark: Survival Evolved en PC? </strong></li>
|
41 |
-
<li>A: Ark: Survival Evolved soporta hasta cuatro jugadores en modo de pantalla dividida en PC. También admite hasta ocho jugadores en modo cooperativo en servidores Host/Local, y hasta 70 jugadores en servidores Oficiales y No Oficiales. </li>
|
42 |
-
<li><strong>P: ¿Cuántos mapas hay en Ark: Survival Evolved en PC? </strong></li>
|
43 |
-
|
44 |
-
<li><strong>Q: ¿Cómo puedo domar criaturas en Ark: Survival Evolved en PC? </strong></li>
|
45 |
-
<li>A: Hay dos métodos principales de domar criaturas en Ark: Survival Evolved en PC: - Domesticación pasiva: Este método consiste en alimentar a la criatura con su alimento preferido sin agravarlo. Debes acercarte a la criatura cuidadosamente y presionar E para alimentarla cuando se te solicite. Debes repetir este proceso hasta que la barra de domar esté llena. Este método funciona para algunos herbívoros y omnívoros, como Dodos, Parasauros, Lystrosaurs, etc. - Doma por nocaut: Este método consiste en noquear a la criatura usando armas u objetos que infligen daño de torpor, como hondas, flechas tranquilizantes, trampas de narcóticos, etc. Es necesario golpear a la criatura hasta que caiga inconsciente, y luego alimentarla con su comida o alimento preferido mientras la mantiene dormida con narcóticos o narcobayas. Tienes que esperar hasta que la barra de domar esté llena. Este método funciona para la mayoría de los carnívoros y algunos herbívoros y omnívoros, como Raptors, Rexes, Mammoths, etc.</li>
|
46 |
-
</ul></p>
|
47 |
-
<p></p> 64aa2da5cf<br />
|
48 |
-
<br />
|
49 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Defender 3 Apk Mod.md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Defensor 3 APK Mod: Un juego de defensa de la torre con batallas épicas</h1>
|
3 |
-
<p>Si eres un fan de los juegos de torre de defensa, es posible que quieras echar un vistazo a Defender 3, un juego popular que combina estrategia, acción y fantasía. En este juego, tienes que defender tu base de la invasión de dragones y monstruos, utilizando varias torres y armas. Pero lo que si quieres disfrutar del juego sin limitaciones o restricciones? Ahí es donde Defender 3 APK Mod viene muy bien. En este artículo, te contaremos todo lo que necesitas saber sobre esta versión modificada del juego, incluyendo sus características, beneficios y cómo descargarlo e instalarlo en tu dispositivo. </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué es Defender 3?</h3>
|
6 |
-
<p>Defender 3 es un juego de torre de defensa desarrollado por DroidHen, un estudio de juego chino. Es la secuela de la popular serie Defender, que tiene más de 10 millones de descargas en Google Play. El juego se desarrolla en un mundo de fantasía donde dragones y monstruos están tratando de destruir su reino. Tienes que construir y mejorar sus torres, equipar sus armas, y utilizar sus habilidades para detenerlos. El juego tiene cuatro modos: Campaña, Sin fin, Expedición, y Boss Fight. También puedes jugar con otros jugadores en línea y competir por la puntuación más alta en las tablas de clasificación. </p>
|
7 |
-
<h2>defender 3 apk mod</h2><br /><p><b><b>Download</b> ❤❤❤ <a href="https://bltlly.com/2v6KnY">https://bltlly.com/2v6KnY</a></b></p><br /><br />
|
8 |
-
<h3>¿Por qué deberías jugar Defender 3 APK Mod? </h3>
|
9 |
-
<p>Defender 3 es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, también tiene algunos inconvenientes que podrían afectar su experiencia de juego. Por ejemplo, necesitas gastar dinero real para comprar más gemas y monedas, que se usan para desbloquear nuevas torres, armas y mejoras. También tienes que ver anuncios para obtener recompensas gratis o acelerar el progreso del juego. Además, algunos niveles y enemigos son demasiado difíciles de superar, especialmente si no tienes suficientes recursos o habilidades. </p>
|
10 |
-
|
11 |
-
<h2>Características de Defender 3 APK Mod</h2>
|
12 |
-
<h3>Dinero ilimitado y gemas</h3>
|
13 |
-
<p>El beneficio más obvio de jugar Defender 3 APK Mod es que se obtiene dinero ilimitado y gemas en el juego. El dinero se utiliza para comprar y actualizar sus torres, mientras que las gemas se utilizan para comprar y mejorar sus armas. Con recursos ilimitados, no tiene que preocuparse por quedarse sin ellos o gastar dinero real para obtener más. También puedes utilizarlos para comprar potenciadores, pociones, pergaminos y otros elementos que te pueden ayudar en el juego. </p>
|
14 |
-
<h3>Varias torres y armas para elegir</h3>
|
15 |
-
<p>Otra característica de Defender 3 APK Mod es que desbloquea todas las torres y armas en el juego. Hay más de 20 tipos de torres que tienen diferentes funciones y habilidades, como fuego, hielo, rayos, veneno, etc. También puede equipar a su héroe con más de 50 tipos de armas que tienen diferentes efectos y daños, tales como arcos, ballestas, armas, cañones, etc. Puede mezclar y combinar diferentes combinaciones de torres y armas para adaptarse a su estrategia y preferencia. </p>
|
16 |
-
<h3>Impresionantes gráficos y efectos de sonido</h3>
|
17 |
-
<p>Defender 3 APK Mod también mejora los gráficos y efectos de sonido del juego. El juego tiene hermosos gráficos en 2D que muestran los detalles del mundo del juego y los personajes. Los efectos de sonido también son realistas e inmersivos, haciéndote sentir como si estuvieras en medio de una batalla. También puede ajustar los gráficos y la configuración de sonido de acuerdo con el rendimiento y las preferencias de su dispositivo. </p>
|
18 |
-
<h3>Desafiando niveles y enemigos</h3>
|
19 |
-
|
20 |
-
<h3>Modo multijugador y tablas de clasificación</h3>
|
21 |
-
<p>Defender 3 APK Mod también le permite jugar con otros jugadores en línea y competir por la puntuación más alta en las tablas de clasificación. Puede unirse o crear una habitación e invitar a sus amigos o jugadores al azar a unirse a usted. También puede chatear con ellos y compartir sus consejos y estrategias. También puedes ver tu ranking y logros en las tablas de clasificación globales y regionales. También puedes ganar recompensas y trofeos para completar ciertas tareas y desafíos. </p>
|
22 |
-
<h2>Cómo descargar e instalar Defender 3 APK Mod</h2>
|
23 |
-
<h3>Paso 1: Descargar el archivo APK de una fuente de confianza</h3>
|
24 |
-
<p>El primer paso para descargar e instalar Defender 3 APK Mod es encontrar una fuente confiable que proporciona el archivo APK. Usted puede buscar en línea para los sitios web que ofrecen este mod, pero asegúrese de comprobar sus comentarios y calificaciones antes de descargar nada. También puede utilizar este enlace para descargar el archivo APK directamente. </p>
|
25 |
-
<p></p>
|
26 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h3>
|
27 |
-
<p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo, lo que le permite instalar aplicaciones que no son de Google Play Store. Para hacer esto, ve a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y conéctala. Puede ver un mensaje de advertencia, pero simplemente ignórelo y proceda. </p>
|
28 |
-
<h3>Paso 3: Instalar el archivo APK y lanzar el juego</h3>
|
29 |
-
<p>El paso final es instalar el archivo APK y lanzar el juego. Para hacer esto, busque el archivo APK que descargó en el almacenamiento de su dispositivo, luego toque en él y siga las instrucciones. Una vez finalizada la instalación, puedes abrir el juego y disfrutarlo. </p>
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
|
32 |
-
<p>Si usted está interesado en jugar Defender 3 APK Mod, solo tienes que seguir los pasos anteriores para descargar e instalar en su dispositivo. Es fácil y seguro hacerlo, siempre y cuando uses una fuente de confianza. Una vez que la tengas en tu dispositivo, puedes comenzar a reproducirla de inmediato. </p>
|
33 |
-
<h4>Preguntas frecuentes</h4>
|
34 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Defender 3 APK Mod:</p>
|
35 |
-
<ul>
|
36 |
-
<li><b>Es Defender 3 APK Mod libre? </b></li>
|
37 |
-
<p>Sí, Defender 3 APK Mod es gratis para descargar y jugar. Usted no tiene que pagar nada para disfrutar de sus características. </p>
|
38 |
-
<li><b>¿Es seguro Defender 3 APK Mod? </b></li>
|
39 |
-
<p>Sí, Defender 3 APK Mod es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. No contiene ningún virus o malware que pueda dañar su dispositivo o datos. </p>
|
40 |
-
<li><b>Es Defender 3 APK Mod compatible con mi dispositivo? </b></li>
|
41 |
-
<p>Defender 3 APK Mod es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.0 o superior. Sin embargo, algunos dispositivos pueden tener problemas con los gráficos o el rendimiento del juego. </p>
|
42 |
-
<li><b>¿Puedo jugar Defender 3 APK Mod sin conexión? </b></li>
|
43 |
-
<p>Sí, puedes jugar Defender 3 APK Mod sin conexión a Internet. Sin embargo, algunas características como el modo multijugador y tablas de clasificación no estarán disponibles. </p>
|
44 |
-
<li><b>¿Puedo actualizar Defender 3 APK Mod? </b></li>
|
45 |
-
<p>No, no se puede actualizar Defender 3 APK Mod a través de la Google Play Store o cualquier otra fuente. Si quieres obtener la última versión del juego, tienes que descargarlo e instalarlo de nuevo desde la misma fuente o desde una diferente. </p>
|
46 |
-
<p>Espero que este artículo le ha ayudado a aprender más sobre Defender 3 APK Mod y cómo descargar e instalar en su dispositivo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer y jugar feliz! </p> 64aa2da5cf<br />
|
47 |
-
<br />
|
48 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga De Fiebre De Oficina Juego.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descarga del juego Office Fever: Cómo jugar y disfrutar de este divertido juego</h1>
|
3 |
-
<p>¿Sueñas con convertir tu empresa emergente en un imperio del dinero? ¿Te encantan los juegos ociosos que te permiten gestionar tu propio negocio y ganar dinero mientras te relajas? Si respondiste sí, entonces te encantará Office Fever Game, un divertido y adictivo juego de simulación que te permite dirigir tu propia oficina y convertirte en un magnate! </p>
|
4 |
-
<h2>descarga de fiebre de oficina juego</h2><br /><p><b><b>Download</b> ——— <a href="https://bltlly.com/2v6JCf">https://bltlly.com/2v6JCf</a></b></p><br /><br />
|
5 |
-
<p>En este artículo, le diremos todo lo que necesita saber sobre Office Fever Game, incluyendo lo que es, cómo descargarlo e instalarlo en su dispositivo, cómo jugarlo y algunos consejos y trucos para ayudarlo a tener éxito. ¡Vamos a empezar! </p>
|
6 |
-
<h2>¿Qué es el juego Office Fever? </h2>
|
7 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
8 |
-
<p>Office Fever Game es un juego de simulación desarrollado por Rollic Games, un popular desarrollador de juegos casuales para dispositivos Android e iOS. El juego fue lanzado en junio de 2022 y ha recibido más de 10 millones de descargas y 4.5 estrellas en Google Play Store. El juego también está disponible en BestGames.com y BlueStacks, un emulador que te permite jugar juegos Android en tu PC o Mac.</p>
|
9 |
-
<p>Office Fever Game es un juego inactivo que te permite dirigir tu propia oficina y ganar dinero procesando documentos. Empiezas contratando trabajadores y llevándoles papel para completar. A medida que terminan las tareas, recoges dinero y haces crecer tu negocio. Puede desbloquear nuevas áreas de oficina, contratar a más trabajadores, mejorar sus habilidades y dispositivos, y descubrir nuevas formas de hacer dinero. El juego es fácil de jugar, pero difícil de dominar. Usted tiene que equilibrar su productividad, velocidad y flujo de caja, evitando al mismo tiempo holgazanear y dormir a los trabajadores. El juego tiene un estilo gráfico colorido y caricaturesco, una banda sonora pegadiza y un tono humorístico. El juego es adecuado para todas las edades y se puede jugar sin conexión. </p>
|
10 |
-
<p></p>
|
11 |
-
<h3>Cómo descargar e instalar Office Fever Game en tu dispositivo</h3>
|
12 |
-
|
13 |
-
<ol>
|
14 |
-
<li>Abre Google Play Store o App Store en tu dispositivo. </li>
|
15 |
-
<li>Buscar "Office Fever" en la barra de búsqueda. </li>
|
16 |
-
<li>Toque en el icono del juego que aparece en los resultados. </li>
|
17 |
-
<li>Toque en el botón "Instalar" o "Obtener" para iniciar la descarga. </li>
|
18 |
-
<li>Espera a que termine la descarga y luego toca el botón "Abrir" o "Jugar" para iniciar el juego. </li>
|
19 |
-
</ol>
|
20 |
-
<p>Si quieres jugar Office Fever Game en tu PC o Mac, puedes usar BlueStacks, un emulador que te permite ejecutar aplicaciones Android en tu ordenador. Solo tienes que seguir estos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Descargar BlueStacks desde su sitio web oficial . </li>
|
23 |
-
<li>Instalar BlueStacks en su PC o Mac siguiendo las instrucciones. </li>
|
24 |
-
<li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
|
25 |
-
<li>Buscar "Office Fever" en la barra de búsqueda. </li>
|
26 |
-
<li>Haga clic en el icono del juego que aparece en los resultados. </li>
|
27 |
-
<li>Haga clic en el botón "Instalar" para iniciar la descarga. </li>
|
28 |
-
<li>Espera a que termine la descarga y luego haz clic en el botón "Play" para iniciar el juego. </li>
|
29 |
-
</ol>
|
30 |
-
<h2>Cómo jugar juego de fiebre de oficina</h2>
|
31 |
-
<h3>Los fundamentos del juego y los controles</h3>
|
32 |
-
<p>El juego de Office Fever Game es simple e intuitivo. Tienes que dirigir tu propia oficina y ganar dinero procesando documentos. Puede ver su área de oficina en la pantalla, con sus trabajadores, escritorios, papelería y efectivo. También puedes ver tu saldo de efectivo, gemas y nivel en la parte superior de la pantalla. </p>
|
33 |
-
<p>Los controles de Office Fever Game son fáciles y sensibles. Puedes interactuar con el juego tocando, arrastrando y deslizando en la pantalla. Estas son algunas de las acciones básicas que puedes hacer:</p>
|
34 |
-
<ul>
|
35 |
-
<li>Toque en un trabajador para contratarlos o despertarlos si están durmiendo. </li>
|
36 |
-
<li>Arrastre un trabajo de papel desde la pila al escritorio de un trabajador para asignarle una tarea. </li>
|
37 |
-
<li> Deslice hacia la izquierda o hacia la derecha en la pantalla para moverse a diferentes áreas de oficina. </li>
|
38 |
-
<li>Toque en el icono de efectivo para recoger sus ganancias. </li>
|
39 |
-
|
40 |
-
</ul>
|
41 |
-
<h3>Cómo contratar trabajadores y procesar documentos</h3>
|
42 |
-
<p>La principal manera de ganar dinero en Office Fever Game es mediante la contratación de trabajadores y el procesamiento de documentos. Usted comienza con un trabajador y un escritorio en su oficina. Puede contratar a más trabajadores tocando en los escritorios o sillas vacíos en su área de oficina. Cada trabajador cuesta una cierta cantidad de dinero, que aumenta a medida que contrata más. Puede ver el costo de contratar a un trabajador por encima de su escritorio o silla. </p>
|
43 |
-
<p>Una vez que haya contratado a un trabajador, puede asignarles un trabajo de papel arrastrándolo desde la pila hasta su escritorio. Cada trabajo de papel tiene un color y un número, que indican su tipo y dificultad. El color del papel coincide con el color de la camisa del trabajador, lo que significa que están especializados en ese tipo de tareas. El número de trabajo en papel muestra cuánto tiempo le tomará al trabajador completarlo. Cuanto mayor sea el número, más tiempo tomará. </p>
|
44 |
-
<p>Puede ver el progreso de la tarea de cada trabajador mirando la barra sobre su cabeza. Cuando la barra está llena, significa que han terminado su tarea y se puede recoger su dinero en efectivo pulsando en el icono de dinero en efectivo por encima de su cabeza. La cantidad de dinero que ganas depende del tipo y la dificultad del trabajo en papel. También puede ganar gemas, que son una moneda premium que se puede utilizar para comprar artículos especiales y potenciadores. </p>
|
45 |
-
<h3>Cómo desbloquear nuevas áreas de oficina y ganar más dinero</h3>
|
46 |
-
<p>A medida que avanzas en Office Fever Game, puedes desbloquear nuevas áreas de oficina que te permitirán contratar a más trabajadores y procesar más documentos. Puedes desbloquear nuevas áreas de oficina alcanzando un cierto nivel o gastando gemas. Puede ver los requisitos para desbloquear cada área de oficina tocando el icono de bloqueo en la puerta o ventana. </p>
|
47 |
-
|
48 |
-
<p>Puede moverse entre diferentes áreas de oficina deslizando hacia la izquierda o hacia la derecha en la pantalla. También puede ver una visión general de todas las áreas de su oficina tocando el icono del mapa en la esquina inferior izquierda de la pantalla. Puedes ver cuántos trabajadores tienes en cada área de oficina, cuánto dinero están ganando por segundo y cuánto dinero en efectivo has recaudado en total. </p>
|
49 |
-
<h3>Cómo mejorar tus habilidades y dispositivos</h3>
|
50 |
-
<p>Otra forma de mejorar tu rendimiento e ingresos en Office Fever Game es mejorando tus habilidades y dispositivos. Puedes acceder a tus habilidades y dispositivos tocando el icono del menú en la esquina inferior derecha de la pantalla y luego tocando los iconos de habilidades o dispositivos. </p>
|
51 |
-
<p>Tus habilidades son habilidades que te ayudarán a dirigir tu oficina de manera más eficiente y efectiva. Puedes mejorar tus habilidades gastando dinero o joyas. Algunas de las habilidades que puedes mejorar son:</p>
|
52 |
-
<ul>
|
53 |
-
<li>Velocidad: Aumenta la rapidez con la que los trabajadores completan sus tareas. </li>
|
54 |
-
<li>Productividad: Aumenta la cantidad de dinero que ganas con cada tarea. </li>
|
55 |
-
<li>Despierto: Aumenta el tiempo que sus trabajadores permanecen despiertos antes de dormirse. </li>
|
56 |
-
<li>Motivación: Aumenta la rapidez con la que tus trabajadores se despiertan después de dormirse. </li>
|
57 |
-
<li>Flujo de caja: Aumenta la cantidad de efectivo que recoges automáticamente cada segundo. </li>
|
58 |
-
</ul>
|
59 |
-
<p>Sus dispositivos son gadgets que mejorarán su entorno de oficina y aumentar el rendimiento de sus trabajadores. Puede comprar nuevos dispositivos o actualizar los existentes gastando dinero o joyas. Algunos de los dispositivos que puede comprar o actualizar son:</p>
|
60 |
-
<ul>
|
61 |
-
<li>Máquina de café: Proporciona café para sus trabajadores que los mantendrá despiertos más tiempo. </li>
|
62 |
-
<li>Ventilador: Proporciona aire fresco para sus trabajadores que les impedirá aflojar. </li>
|
63 |
-
<li>Impresora: Imprime más papel para sus trabajadores que aumentará su productividad. </li>
|
64 |
-
<li>Laptop: Proporciona un dispositivo más rápido y eficiente para sus trabajadores que reducirá su tiempo de tarea. </li>
|
65 |
-
|
66 |
-
</ul>
|
67 |
-
<h2>Consejos y trucos para el juego de fiebre de oficina</h2>
|
68 |
-
<h3>Cómo aumentar su productividad y velocidad</h3>
|
69 |
-
<p>Si quieres ganar más dinero y subir de nivel más rápido en Office Fever Game, necesitas aumentar tu productividad y velocidad. Aquí hay algunos consejos y trucos para ayudarte a hacerlo:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Asigna el trabajo de papel correcto al trabajador correcto. Combinar el color del papel y la camisa del trabajador aumentará su productividad y velocidad. </li>
|
72 |
-
<li>Mejora tus habilidades y dispositivos regularmente. Invertir en tus habilidades y dispositivos mejorará tu rendimiento e ingresos a largo plazo. </li>
|
73 |
-
<li>Use boosters sabiamente. Los boosters son artículos especiales que pueden darle una ventaja temporal en el juego. Puedes comprar boosters con gemas o ver anuncios gratis. Algunos de los boosters que puedes usar son:</li>
|
74 |
-
<ul>
|
75 |
-
<li>Cash Booster: Duplica tus ganancias en efectivo por un tiempo limitado. </li>
|
76 |
-
<li>Gem Booster: Duplica tus ganancias de gemas por un tiempo limitado. </li>
|
77 |
-
<li>Speed Booster: Aumenta la velocidad de sus trabajadores en un 50% por un tiempo limitado. </li>
|
78 |
-
<li>Potenciador de la productividad: Aumenta la productividad de sus trabajadores en un 50% por un tiempo limitado. </li>
|
79 |
-
</ul>
|
80 |
-
</ul>
|
81 |
-
<h3> ¿Cómo evitar holgazanear y dormir a los trabajadores</h3>
|
82 |
-
<p>Uno de los retos en Office Fever Game es evitar holgazanear y dormir a los trabajadores. Los trabajadores flojos son aquellos que dejan de trabajar y comienzan a hacer otra cosa, como jugar juegos, leer libros o chatear por teléfono. Los trabajadores durmientes son aquellos que se duermen en sus escritorios después de trabajar demasiado tiempo. Tanto los trabajadores holgazanes como los que duermen reducirán tu productividad y velocidad, y te costarán dinero. </p>
|
83 |
-
<p>Aquí hay algunos consejos y trucos para ayudarle a evitar holgazanear y dormir a los trabajadores:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Toque en aflojar los trabajadores para hacerlos dejar de hacer lo que están haciendo y volver al trabajo. </li>
|
86 |
-
<li>Toque en los trabajadores que duermen para despertarlos y hacerlos reanudar sus tareas. </li>
|
87 |
-
|
88 |
-
<li>Compre una máquina de café y un ventilador para proporcionar café y aire fresco para sus trabajadores que los mantendrá despiertos durante más tiempo y evitará que aflojen. </li>
|
89 |
-
<li>Compre un televisor para proporcionar entretenimiento para sus trabajadores que los motivará a trabajar más duro y evitar holgazanear. </li>
|
90 |
-
</ul>
|
91 |
-
<h3>Cómo usar a los gerentes y a los chicos de oficina efectivamente</h3>
|
92 |
-
<p>Otra característica en Office Fever Game son los gerentes y chicos de oficina. Los gerentes son trabajadores especiales que pueden manejar a otros trabajadores en un área de oficina. Los chicos de oficina son trabajadores especiales que pueden recoger dinero de otros trabajadores en un área de oficina. Tanto los gerentes como los oficinistas pueden ahorrarle tiempo y esfuerzo, y aumentar su eficiencia e ingresos. </p>
|
93 |
-
<p>Aquí hay algunos consejos y trucos para ayudarle a utilizar los gerentes y los chicos de oficina con eficacia:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Puede contratar a gerentes y chicos de oficina tocando en el gerente o chico de la oficina iconos en la parte superior de cada área de oficina. Cada gerente o chico de oficina cuesta una cierta cantidad de dinero en efectivo o gemas, que aumenta a medida que contrata más. </li>
|
96 |
-
<li>Puede asignar un administrador o un chico de oficina a un área de oficina arrastrándolos desde el icono al área de oficina. Puede ver el número de gerentes o chicos de oficina asignados a cada área de oficina mirando el icono. </li>
|
97 |
-
<li>Un gerente puede administrar hasta 10 trabajadores en un área de oficina. Un gerente asignará automáticamente el trabajo de papel a los trabajadores, recogerá dinero en efectivo de ellos, los despertará si están durmiendo, y los detendrá de holgazanear. Un gerente también aumentará la productividad y la velocidad de los trabajadores en su área de oficina en un 10%. </li>
|
98 |
-
<li>Un chico de oficina puede recoger dinero en efectivo de hasta 10 trabajadores en un área de oficina. Un chico de oficina recogerá automáticamente dinero de los trabajadores, sin tener que tocar el icono de dinero encima de su cabeza. Un chico de oficina también aumentará los ingresos en efectivo de los trabajadores en su área de oficina en un 10%. </li>
|
99 |
-
</ul>
|
100 |
-
<h3>Cómo ganar más dinero y gemas</h3>
|
101 |
-
|
102 |
-
<p>Aquí hay algunos consejos y trucos para ayudarle a ganar más dinero y gemas:</p>
|
103 |
-
<ul>
|
104 |
-
<li>Procesa más documentos. Cuantos más documentos proceses, más dinero y gemas ganarás. Trate de asignar el trabajo de papel correcto al trabajador adecuado y actualice sus habilidades y dispositivos para aumentar su productividad y velocidad. </li>
|
105 |
-
<li>Recoge tu dinero regularmente. No dejes que tu dinero se acumule en los escritorios de tus trabajadores, ya que los ralentizará y reducirá tus ganancias. Toque en el icono de dinero en efectivo por encima de su cabeza o utilizar un chico de oficina para recoger su dinero automáticamente. </li>
|
106 |
-
<li>Ver anuncios. Puede ver anuncios para obtener dinero gratis, gemas, o refuerzos. También puede ver anuncios para duplicar sus ganancias sin conexión o pasar el tiempo de espera para desbloquear nuevas áreas de oficina. </li>
|
107 |
-
<li>Logros completos. Puedes completar logros para ganar dinero extra y gemas. Puedes ver tus logros tocando el icono del trofeo en la esquina inferior derecha de la pantalla. Puedes ver los requisitos y recompensas para cada logro, y reclamarlos cuando los completes. </li>
|
108 |
-
<li>Gira la rueda. Puedes girar la rueda una vez al día para obtener una recompensa aleatoria, como dinero en efectivo, gemas, boosters o dispositivos. Puede acceder a la rueda tocando el icono de la rueda en la esquina inferior izquierda de la pantalla. </li>
|
109 |
-
</ul>
|
110 |
-
<h2>Conclusión</h2>
|
111 |
-
<p>Office Fever Game es un divertido y adictivo juego de simulación que te permite dirigir tu propia oficina y convertirte en un magnate. Puede contratar trabajadores, procesar documentos, desbloquear nuevas áreas de oficina, actualizar sus habilidades y dispositivos, y descubrir nuevas formas de hacer dinero. El juego es fácil de jugar, pero difícil de dominar. Usted tiene que equilibrar su productividad, velocidad y flujo de caja, evitando al mismo tiempo holgazanear y dormir a los trabajadores. El juego tiene un estilo gráfico colorido y caricaturesco, una banda sonora pegadiza y un tono humorístico. El juego es adecuado para todas las edades y se puede jugar sin conexión. </p>
|
112 |
-
|
113 |
-
<h3>Preguntas frecuentes</h3>
|
114 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Office Fever Game:</p>
|
115 |
-
<ol>
|
116 |
-
<li><b>¿Cuántas áreas de oficina hay en Office Fever Game? </b></li>
|
117 |
-
<p>Hay 10 áreas de oficina en Office Fever Game, cada una con un tema y diseño diferentes. Son: Sótano, Garaje, Ático, Rascacielos, Estación Espacial, Castillo, Pirámide, Isla, Volcán y Cielo.</p>
|
118 |
-
<li><b>¿Cuántos trabajadores hay en Office Fever Game? </b></li>
|
119 |
-
<p>Hay 10 tipos de trabajadores en Office Fever Game, cada uno con un color y especialización diferentes. Son: Rojo (Contabilidad), Naranja (Marketing), Amarillo (Ventas), Verde (TI), Azul (Legal), Púrpura (Diseño), Rosa (HR), Marrón (Seguridad), Negro (Hacker), y Blanco (CEO). </p>
|
120 |
-
<li><b>¿Cuántos documentos hay en Office Fever Game? </b></li>
|
121 |
-
<p>Hay 10 tipos de documentos en Office Fever Game, cada uno con un color y dificultad diferentes. Son: Rojo (Factura), Naranja (Volante), Amarillo (Contrato), Verde (Código), Azul (Demanda), Púrpura (Logotipo), Rosa (Reanudar), Marrón (Informe), Negro (Cifrado), y Blanco (Estrategia). </p>
|
122 |
-
<li><b>¿Cuántas habilidades hay en Office Fever Game? </b></li>
|
123 |
-
<p>Hay 5 habilidades en Office Fever Game que puedes actualizar con efectivo o gemas. Son: Velocidad, Productividad, Despertar, Motivación y Flujo de Caja.</p>
|
124 |
-
<li><b>¿Cuántos dispositivos hay en Office Fever Game? </b></li>
|
125 |
-
<p>Hay 5 dispositivos en Office Fever Game que puedes comprar o actualizar con efectivo o gemas. Son: Cafetera, Ventilador, Impresora, Laptop y TV.</p>
|
126 |
-
</ol></p> 64aa2da5cf<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/index/package_finder.py
DELETED
@@ -1,1029 +0,0 @@
|
|
1 |
-
"""Routines related to PyPI, indexes"""
|
2 |
-
|
3 |
-
import enum
|
4 |
-
import functools
|
5 |
-
import itertools
|
6 |
-
import logging
|
7 |
-
import re
|
8 |
-
from typing import TYPE_CHECKING, FrozenSet, Iterable, List, Optional, Set, Tuple, Union
|
9 |
-
|
10 |
-
from pip._vendor.packaging import specifiers
|
11 |
-
from pip._vendor.packaging.tags import Tag
|
12 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
13 |
-
from pip._vendor.packaging.version import _BaseVersion
|
14 |
-
from pip._vendor.packaging.version import parse as parse_version
|
15 |
-
|
16 |
-
from pip._internal.exceptions import (
|
17 |
-
BestVersionAlreadyInstalled,
|
18 |
-
DistributionNotFound,
|
19 |
-
InvalidWheelFilename,
|
20 |
-
UnsupportedWheel,
|
21 |
-
)
|
22 |
-
from pip._internal.index.collector import LinkCollector, parse_links
|
23 |
-
from pip._internal.models.candidate import InstallationCandidate
|
24 |
-
from pip._internal.models.format_control import FormatControl
|
25 |
-
from pip._internal.models.link import Link
|
26 |
-
from pip._internal.models.search_scope import SearchScope
|
27 |
-
from pip._internal.models.selection_prefs import SelectionPreferences
|
28 |
-
from pip._internal.models.target_python import TargetPython
|
29 |
-
from pip._internal.models.wheel import Wheel
|
30 |
-
from pip._internal.req import InstallRequirement
|
31 |
-
from pip._internal.utils._log import getLogger
|
32 |
-
from pip._internal.utils.filetypes import WHEEL_EXTENSION
|
33 |
-
from pip._internal.utils.hashes import Hashes
|
34 |
-
from pip._internal.utils.logging import indent_log
|
35 |
-
from pip._internal.utils.misc import build_netloc
|
36 |
-
from pip._internal.utils.packaging import check_requires_python
|
37 |
-
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
|
38 |
-
|
39 |
-
if TYPE_CHECKING:
|
40 |
-
from pip._vendor.typing_extensions import TypeGuard
|
41 |
-
|
42 |
-
__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
|
43 |
-
|
44 |
-
|
45 |
-
logger = getLogger(__name__)
|
46 |
-
|
47 |
-
BuildTag = Union[Tuple[()], Tuple[int, str]]
|
48 |
-
CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
|
49 |
-
|
50 |
-
|
51 |
-
def _check_link_requires_python(
|
52 |
-
link: Link,
|
53 |
-
version_info: Tuple[int, int, int],
|
54 |
-
ignore_requires_python: bool = False,
|
55 |
-
) -> bool:
|
56 |
-
"""
|
57 |
-
Return whether the given Python version is compatible with a link's
|
58 |
-
"Requires-Python" value.
|
59 |
-
|
60 |
-
:param version_info: A 3-tuple of ints representing the Python
|
61 |
-
major-minor-micro version to check.
|
62 |
-
:param ignore_requires_python: Whether to ignore the "Requires-Python"
|
63 |
-
value if the given Python version isn't compatible.
|
64 |
-
"""
|
65 |
-
try:
|
66 |
-
is_compatible = check_requires_python(
|
67 |
-
link.requires_python,
|
68 |
-
version_info=version_info,
|
69 |
-
)
|
70 |
-
except specifiers.InvalidSpecifier:
|
71 |
-
logger.debug(
|
72 |
-
"Ignoring invalid Requires-Python (%r) for link: %s",
|
73 |
-
link.requires_python,
|
74 |
-
link,
|
75 |
-
)
|
76 |
-
else:
|
77 |
-
if not is_compatible:
|
78 |
-
version = ".".join(map(str, version_info))
|
79 |
-
if not ignore_requires_python:
|
80 |
-
logger.verbose(
|
81 |
-
"Link requires a different Python (%s not in: %r): %s",
|
82 |
-
version,
|
83 |
-
link.requires_python,
|
84 |
-
link,
|
85 |
-
)
|
86 |
-
return False
|
87 |
-
|
88 |
-
logger.debug(
|
89 |
-
"Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
|
90 |
-
version,
|
91 |
-
link.requires_python,
|
92 |
-
link,
|
93 |
-
)
|
94 |
-
|
95 |
-
return True
|
96 |
-
|
97 |
-
|
98 |
-
class LinkType(enum.Enum):
|
99 |
-
candidate = enum.auto()
|
100 |
-
different_project = enum.auto()
|
101 |
-
yanked = enum.auto()
|
102 |
-
format_unsupported = enum.auto()
|
103 |
-
format_invalid = enum.auto()
|
104 |
-
platform_mismatch = enum.auto()
|
105 |
-
requires_python_mismatch = enum.auto()
|
106 |
-
|
107 |
-
|
108 |
-
class LinkEvaluator:
|
109 |
-
|
110 |
-
"""
|
111 |
-
Responsible for evaluating links for a particular project.
|
112 |
-
"""
|
113 |
-
|
114 |
-
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
|
115 |
-
|
116 |
-
# Don't include an allow_yanked default value to make sure each call
|
117 |
-
# site considers whether yanked releases are allowed. This also causes
|
118 |
-
# that decision to be made explicit in the calling code, which helps
|
119 |
-
# people when reading the code.
|
120 |
-
def __init__(
|
121 |
-
self,
|
122 |
-
project_name: str,
|
123 |
-
canonical_name: str,
|
124 |
-
formats: FrozenSet[str],
|
125 |
-
target_python: TargetPython,
|
126 |
-
allow_yanked: bool,
|
127 |
-
ignore_requires_python: Optional[bool] = None,
|
128 |
-
) -> None:
|
129 |
-
"""
|
130 |
-
:param project_name: The user supplied package name.
|
131 |
-
:param canonical_name: The canonical package name.
|
132 |
-
:param formats: The formats allowed for this package. Should be a set
|
133 |
-
with 'binary' or 'source' or both in it.
|
134 |
-
:param target_python: The target Python interpreter to use when
|
135 |
-
evaluating link compatibility. This is used, for example, to
|
136 |
-
check wheel compatibility, as well as when checking the Python
|
137 |
-
version, e.g. the Python version embedded in a link filename
|
138 |
-
(or egg fragment) and against an HTML link's optional PEP 503
|
139 |
-
"data-requires-python" attribute.
|
140 |
-
:param allow_yanked: Whether files marked as yanked (in the sense
|
141 |
-
of PEP 592) are permitted to be candidates for install.
|
142 |
-
:param ignore_requires_python: Whether to ignore incompatible
|
143 |
-
PEP 503 "data-requires-python" values in HTML links. Defaults
|
144 |
-
to False.
|
145 |
-
"""
|
146 |
-
if ignore_requires_python is None:
|
147 |
-
ignore_requires_python = False
|
148 |
-
|
149 |
-
self._allow_yanked = allow_yanked
|
150 |
-
self._canonical_name = canonical_name
|
151 |
-
self._ignore_requires_python = ignore_requires_python
|
152 |
-
self._formats = formats
|
153 |
-
self._target_python = target_python
|
154 |
-
|
155 |
-
self.project_name = project_name
|
156 |
-
|
157 |
-
def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
|
158 |
-
"""
|
159 |
-
Determine whether a link is a candidate for installation.
|
160 |
-
|
161 |
-
:return: A tuple (result, detail), where *result* is an enum
|
162 |
-
representing whether the evaluation found a candidate, or the reason
|
163 |
-
why one is not found. If a candidate is found, *detail* will be the
|
164 |
-
candidate's version string; if one is not found, it contains the
|
165 |
-
reason the link fails to qualify.
|
166 |
-
"""
|
167 |
-
version = None
|
168 |
-
if link.is_yanked and not self._allow_yanked:
|
169 |
-
reason = link.yanked_reason or "<none given>"
|
170 |
-
return (LinkType.yanked, f"yanked for reason: {reason}")
|
171 |
-
|
172 |
-
if link.egg_fragment:
|
173 |
-
egg_info = link.egg_fragment
|
174 |
-
ext = link.ext
|
175 |
-
else:
|
176 |
-
egg_info, ext = link.splitext()
|
177 |
-
if not ext:
|
178 |
-
return (LinkType.format_unsupported, "not a file")
|
179 |
-
if ext not in SUPPORTED_EXTENSIONS:
|
180 |
-
return (
|
181 |
-
LinkType.format_unsupported,
|
182 |
-
f"unsupported archive format: {ext}",
|
183 |
-
)
|
184 |
-
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
|
185 |
-
reason = f"No binaries permitted for {self.project_name}"
|
186 |
-
return (LinkType.format_unsupported, reason)
|
187 |
-
if "macosx10" in link.path and ext == ".zip":
|
188 |
-
return (LinkType.format_unsupported, "macosx10 one")
|
189 |
-
if ext == WHEEL_EXTENSION:
|
190 |
-
try:
|
191 |
-
wheel = Wheel(link.filename)
|
192 |
-
except InvalidWheelFilename:
|
193 |
-
return (
|
194 |
-
LinkType.format_invalid,
|
195 |
-
"invalid wheel filename",
|
196 |
-
)
|
197 |
-
if canonicalize_name(wheel.name) != self._canonical_name:
|
198 |
-
reason = f"wrong project name (not {self.project_name})"
|
199 |
-
return (LinkType.different_project, reason)
|
200 |
-
|
201 |
-
supported_tags = self._target_python.get_tags()
|
202 |
-
if not wheel.supported(supported_tags):
|
203 |
-
# Include the wheel's tags in the reason string to
|
204 |
-
# simplify troubleshooting compatibility issues.
|
205 |
-
file_tags = ", ".join(wheel.get_formatted_file_tags())
|
206 |
-
reason = (
|
207 |
-
f"none of the wheel's tags ({file_tags}) are compatible "
|
208 |
-
f"(run pip debug --verbose to show compatible tags)"
|
209 |
-
)
|
210 |
-
return (LinkType.platform_mismatch, reason)
|
211 |
-
|
212 |
-
version = wheel.version
|
213 |
-
|
214 |
-
# This should be up by the self.ok_binary check, but see issue 2700.
|
215 |
-
if "source" not in self._formats and ext != WHEEL_EXTENSION:
|
216 |
-
reason = f"No sources permitted for {self.project_name}"
|
217 |
-
return (LinkType.format_unsupported, reason)
|
218 |
-
|
219 |
-
if not version:
|
220 |
-
version = _extract_version_from_fragment(
|
221 |
-
egg_info,
|
222 |
-
self._canonical_name,
|
223 |
-
)
|
224 |
-
if not version:
|
225 |
-
reason = f"Missing project version for {self.project_name}"
|
226 |
-
return (LinkType.format_invalid, reason)
|
227 |
-
|
228 |
-
match = self._py_version_re.search(version)
|
229 |
-
if match:
|
230 |
-
version = version[: match.start()]
|
231 |
-
py_version = match.group(1)
|
232 |
-
if py_version != self._target_python.py_version:
|
233 |
-
return (
|
234 |
-
LinkType.platform_mismatch,
|
235 |
-
"Python version is incorrect",
|
236 |
-
)
|
237 |
-
|
238 |
-
supports_python = _check_link_requires_python(
|
239 |
-
link,
|
240 |
-
version_info=self._target_python.py_version_info,
|
241 |
-
ignore_requires_python=self._ignore_requires_python,
|
242 |
-
)
|
243 |
-
if not supports_python:
|
244 |
-
reason = f"{version} Requires-Python {link.requires_python}"
|
245 |
-
return (LinkType.requires_python_mismatch, reason)
|
246 |
-
|
247 |
-
logger.debug("Found link %s, version: %s", link, version)
|
248 |
-
|
249 |
-
return (LinkType.candidate, version)
|
250 |
-
|
251 |
-
|
252 |
-
def filter_unallowed_hashes(
|
253 |
-
candidates: List[InstallationCandidate],
|
254 |
-
hashes: Optional[Hashes],
|
255 |
-
project_name: str,
|
256 |
-
) -> List[InstallationCandidate]:
|
257 |
-
"""
|
258 |
-
Filter out candidates whose hashes aren't allowed, and return a new
|
259 |
-
list of candidates.
|
260 |
-
|
261 |
-
If at least one candidate has an allowed hash, then all candidates with
|
262 |
-
either an allowed hash or no hash specified are returned. Otherwise,
|
263 |
-
the given candidates are returned.
|
264 |
-
|
265 |
-
Including the candidates with no hash specified when there is a match
|
266 |
-
allows a warning to be logged if there is a more preferred candidate
|
267 |
-
with no hash specified. Returning all candidates in the case of no
|
268 |
-
matches lets pip report the hash of the candidate that would otherwise
|
269 |
-
have been installed (e.g. permitting the user to more easily update
|
270 |
-
their requirements file with the desired hash).
|
271 |
-
"""
|
272 |
-
if not hashes:
|
273 |
-
logger.debug(
|
274 |
-
"Given no hashes to check %s links for project %r: "
|
275 |
-
"discarding no candidates",
|
276 |
-
len(candidates),
|
277 |
-
project_name,
|
278 |
-
)
|
279 |
-
# Make sure we're not returning back the given value.
|
280 |
-
return list(candidates)
|
281 |
-
|
282 |
-
matches_or_no_digest = []
|
283 |
-
# Collect the non-matches for logging purposes.
|
284 |
-
non_matches = []
|
285 |
-
match_count = 0
|
286 |
-
for candidate in candidates:
|
287 |
-
link = candidate.link
|
288 |
-
if not link.has_hash:
|
289 |
-
pass
|
290 |
-
elif link.is_hash_allowed(hashes=hashes):
|
291 |
-
match_count += 1
|
292 |
-
else:
|
293 |
-
non_matches.append(candidate)
|
294 |
-
continue
|
295 |
-
|
296 |
-
matches_or_no_digest.append(candidate)
|
297 |
-
|
298 |
-
if match_count:
|
299 |
-
filtered = matches_or_no_digest
|
300 |
-
else:
|
301 |
-
# Make sure we're not returning back the given value.
|
302 |
-
filtered = list(candidates)
|
303 |
-
|
304 |
-
if len(filtered) == len(candidates):
|
305 |
-
discard_message = "discarding no candidates"
|
306 |
-
else:
|
307 |
-
discard_message = "discarding {} non-matches:\n {}".format(
|
308 |
-
len(non_matches),
|
309 |
-
"\n ".join(str(candidate.link) for candidate in non_matches),
|
310 |
-
)
|
311 |
-
|
312 |
-
logger.debug(
|
313 |
-
"Checked %s links for project %r against %s hashes "
|
314 |
-
"(%s matches, %s no digest): %s",
|
315 |
-
len(candidates),
|
316 |
-
project_name,
|
317 |
-
hashes.digest_count,
|
318 |
-
match_count,
|
319 |
-
len(matches_or_no_digest) - match_count,
|
320 |
-
discard_message,
|
321 |
-
)
|
322 |
-
|
323 |
-
return filtered
|
324 |
-
|
325 |
-
|
326 |
-
class CandidatePreferences:
|
327 |
-
|
328 |
-
"""
|
329 |
-
Encapsulates some of the preferences for filtering and sorting
|
330 |
-
InstallationCandidate objects.
|
331 |
-
"""
|
332 |
-
|
333 |
-
def __init__(
|
334 |
-
self,
|
335 |
-
prefer_binary: bool = False,
|
336 |
-
allow_all_prereleases: bool = False,
|
337 |
-
) -> None:
|
338 |
-
"""
|
339 |
-
:param allow_all_prereleases: Whether to allow all pre-releases.
|
340 |
-
"""
|
341 |
-
self.allow_all_prereleases = allow_all_prereleases
|
342 |
-
self.prefer_binary = prefer_binary
|
343 |
-
|
344 |
-
|
345 |
-
class BestCandidateResult:
|
346 |
-
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
|
347 |
-
|
348 |
-
This class is only intended to be instantiated by CandidateEvaluator's
|
349 |
-
`compute_best_candidate()` method.
|
350 |
-
"""
|
351 |
-
|
352 |
-
def __init__(
|
353 |
-
self,
|
354 |
-
candidates: List[InstallationCandidate],
|
355 |
-
applicable_candidates: List[InstallationCandidate],
|
356 |
-
best_candidate: Optional[InstallationCandidate],
|
357 |
-
) -> None:
|
358 |
-
"""
|
359 |
-
:param candidates: A sequence of all available candidates found.
|
360 |
-
:param applicable_candidates: The applicable candidates.
|
361 |
-
:param best_candidate: The most preferred candidate found, or None
|
362 |
-
if no applicable candidates were found.
|
363 |
-
"""
|
364 |
-
assert set(applicable_candidates) <= set(candidates)
|
365 |
-
|
366 |
-
if best_candidate is None:
|
367 |
-
assert not applicable_candidates
|
368 |
-
else:
|
369 |
-
assert best_candidate in applicable_candidates
|
370 |
-
|
371 |
-
self._applicable_candidates = applicable_candidates
|
372 |
-
self._candidates = candidates
|
373 |
-
|
374 |
-
self.best_candidate = best_candidate
|
375 |
-
|
376 |
-
def iter_all(self) -> Iterable[InstallationCandidate]:
|
377 |
-
"""Iterate through all candidates."""
|
378 |
-
return iter(self._candidates)
|
379 |
-
|
380 |
-
def iter_applicable(self) -> Iterable[InstallationCandidate]:
|
381 |
-
"""Iterate through the applicable candidates."""
|
382 |
-
return iter(self._applicable_candidates)
|
383 |
-
|
384 |
-
|
385 |
-
class CandidateEvaluator:
|
386 |
-
|
387 |
-
"""
|
388 |
-
Responsible for filtering and sorting candidates for installation based
|
389 |
-
on what tags are valid.
|
390 |
-
"""
|
391 |
-
|
392 |
-
@classmethod
|
393 |
-
def create(
|
394 |
-
cls,
|
395 |
-
project_name: str,
|
396 |
-
target_python: Optional[TargetPython] = None,
|
397 |
-
prefer_binary: bool = False,
|
398 |
-
allow_all_prereleases: bool = False,
|
399 |
-
specifier: Optional[specifiers.BaseSpecifier] = None,
|
400 |
-
hashes: Optional[Hashes] = None,
|
401 |
-
) -> "CandidateEvaluator":
|
402 |
-
"""Create a CandidateEvaluator object.
|
403 |
-
|
404 |
-
:param target_python: The target Python interpreter to use when
|
405 |
-
checking compatibility. If None (the default), a TargetPython
|
406 |
-
object will be constructed from the running Python.
|
407 |
-
:param specifier: An optional object implementing `filter`
|
408 |
-
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
|
409 |
-
versions.
|
410 |
-
:param hashes: An optional collection of allowed hashes.
|
411 |
-
"""
|
412 |
-
if target_python is None:
|
413 |
-
target_python = TargetPython()
|
414 |
-
if specifier is None:
|
415 |
-
specifier = specifiers.SpecifierSet()
|
416 |
-
|
417 |
-
supported_tags = target_python.get_tags()
|
418 |
-
|
419 |
-
return cls(
|
420 |
-
project_name=project_name,
|
421 |
-
supported_tags=supported_tags,
|
422 |
-
specifier=specifier,
|
423 |
-
prefer_binary=prefer_binary,
|
424 |
-
allow_all_prereleases=allow_all_prereleases,
|
425 |
-
hashes=hashes,
|
426 |
-
)
|
427 |
-
|
428 |
-
def __init__(
|
429 |
-
self,
|
430 |
-
project_name: str,
|
431 |
-
supported_tags: List[Tag],
|
432 |
-
specifier: specifiers.BaseSpecifier,
|
433 |
-
prefer_binary: bool = False,
|
434 |
-
allow_all_prereleases: bool = False,
|
435 |
-
hashes: Optional[Hashes] = None,
|
436 |
-
) -> None:
|
437 |
-
"""
|
438 |
-
:param supported_tags: The PEP 425 tags supported by the target
|
439 |
-
Python in order of preference (most preferred first).
|
440 |
-
"""
|
441 |
-
self._allow_all_prereleases = allow_all_prereleases
|
442 |
-
self._hashes = hashes
|
443 |
-
self._prefer_binary = prefer_binary
|
444 |
-
self._project_name = project_name
|
445 |
-
self._specifier = specifier
|
446 |
-
self._supported_tags = supported_tags
|
447 |
-
# Since the index of the tag in the _supported_tags list is used
|
448 |
-
# as a priority, precompute a map from tag to index/priority to be
|
449 |
-
# used in wheel.find_most_preferred_tag.
|
450 |
-
self._wheel_tag_preferences = {
|
451 |
-
tag: idx for idx, tag in enumerate(supported_tags)
|
452 |
-
}
|
453 |
-
|
454 |
-
def get_applicable_candidates(
|
455 |
-
self,
|
456 |
-
candidates: List[InstallationCandidate],
|
457 |
-
) -> List[InstallationCandidate]:
|
458 |
-
"""
|
459 |
-
Return the applicable candidates from a list of candidates.
|
460 |
-
"""
|
461 |
-
# Using None infers from the specifier instead.
|
462 |
-
allow_prereleases = self._allow_all_prereleases or None
|
463 |
-
specifier = self._specifier
|
464 |
-
versions = {
|
465 |
-
str(v)
|
466 |
-
for v in specifier.filter(
|
467 |
-
# We turn the version object into a str here because otherwise
|
468 |
-
# when we're debundled but setuptools isn't, Python will see
|
469 |
-
# packaging.version.Version and
|
470 |
-
# pkg_resources._vendor.packaging.version.Version as different
|
471 |
-
# types. This way we'll use a str as a common data interchange
|
472 |
-
# format. If we stop using the pkg_resources provided specifier
|
473 |
-
# and start using our own, we can drop the cast to str().
|
474 |
-
(str(c.version) for c in candidates),
|
475 |
-
prereleases=allow_prereleases,
|
476 |
-
)
|
477 |
-
}
|
478 |
-
|
479 |
-
# Again, converting version to str to deal with debundling.
|
480 |
-
applicable_candidates = [c for c in candidates if str(c.version) in versions]
|
481 |
-
|
482 |
-
filtered_applicable_candidates = filter_unallowed_hashes(
|
483 |
-
candidates=applicable_candidates,
|
484 |
-
hashes=self._hashes,
|
485 |
-
project_name=self._project_name,
|
486 |
-
)
|
487 |
-
|
488 |
-
return sorted(filtered_applicable_candidates, key=self._sort_key)
|
489 |
-
|
490 |
-
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
|
491 |
-
"""
|
492 |
-
Function to pass as the `key` argument to a call to sorted() to sort
|
493 |
-
InstallationCandidates by preference.
|
494 |
-
|
495 |
-
Returns a tuple such that tuples sorting as greater using Python's
|
496 |
-
default comparison operator are more preferred.
|
497 |
-
|
498 |
-
The preference is as follows:
|
499 |
-
|
500 |
-
First and foremost, candidates with allowed (matching) hashes are
|
501 |
-
always preferred over candidates without matching hashes. This is
|
502 |
-
because e.g. if the only candidate with an allowed hash is yanked,
|
503 |
-
we still want to use that candidate.
|
504 |
-
|
505 |
-
Second, excepting hash considerations, candidates that have been
|
506 |
-
yanked (in the sense of PEP 592) are always less preferred than
|
507 |
-
candidates that haven't been yanked. Then:
|
508 |
-
|
509 |
-
If not finding wheels, they are sorted by version only.
|
510 |
-
If finding wheels, then the sort order is by version, then:
|
511 |
-
1. existing installs
|
512 |
-
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
|
513 |
-
3. source archives
|
514 |
-
If prefer_binary was set, then all wheels are sorted above sources.
|
515 |
-
|
516 |
-
Note: it was considered to embed this logic into the Link
|
517 |
-
comparison operators, but then different sdist links
|
518 |
-
with the same version, would have to be considered equal
|
519 |
-
"""
|
520 |
-
valid_tags = self._supported_tags
|
521 |
-
support_num = len(valid_tags)
|
522 |
-
build_tag: BuildTag = ()
|
523 |
-
binary_preference = 0
|
524 |
-
link = candidate.link
|
525 |
-
if link.is_wheel:
|
526 |
-
# can raise InvalidWheelFilename
|
527 |
-
wheel = Wheel(link.filename)
|
528 |
-
try:
|
529 |
-
pri = -(
|
530 |
-
wheel.find_most_preferred_tag(
|
531 |
-
valid_tags, self._wheel_tag_preferences
|
532 |
-
)
|
533 |
-
)
|
534 |
-
except ValueError:
|
535 |
-
raise UnsupportedWheel(
|
536 |
-
"{} is not a supported wheel for this platform. It "
|
537 |
-
"can't be sorted.".format(wheel.filename)
|
538 |
-
)
|
539 |
-
if self._prefer_binary:
|
540 |
-
binary_preference = 1
|
541 |
-
if wheel.build_tag is not None:
|
542 |
-
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
|
543 |
-
assert match is not None, "guaranteed by filename validation"
|
544 |
-
build_tag_groups = match.groups()
|
545 |
-
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
|
546 |
-
else: # sdist
|
547 |
-
pri = -(support_num)
|
548 |
-
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
|
549 |
-
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
|
550 |
-
return (
|
551 |
-
has_allowed_hash,
|
552 |
-
yank_value,
|
553 |
-
binary_preference,
|
554 |
-
candidate.version,
|
555 |
-
pri,
|
556 |
-
build_tag,
|
557 |
-
)
|
558 |
-
|
559 |
-
def sort_best_candidate(
|
560 |
-
self,
|
561 |
-
candidates: List[InstallationCandidate],
|
562 |
-
) -> Optional[InstallationCandidate]:
|
563 |
-
"""
|
564 |
-
Return the best candidate per the instance's sort order, or None if
|
565 |
-
no candidate is acceptable.
|
566 |
-
"""
|
567 |
-
if not candidates:
|
568 |
-
return None
|
569 |
-
best_candidate = max(candidates, key=self._sort_key)
|
570 |
-
return best_candidate
|
571 |
-
|
572 |
-
def compute_best_candidate(
|
573 |
-
self,
|
574 |
-
candidates: List[InstallationCandidate],
|
575 |
-
) -> BestCandidateResult:
|
576 |
-
"""
|
577 |
-
Compute and return a `BestCandidateResult` instance.
|
578 |
-
"""
|
579 |
-
applicable_candidates = self.get_applicable_candidates(candidates)
|
580 |
-
|
581 |
-
best_candidate = self.sort_best_candidate(applicable_candidates)
|
582 |
-
|
583 |
-
return BestCandidateResult(
|
584 |
-
candidates,
|
585 |
-
applicable_candidates=applicable_candidates,
|
586 |
-
best_candidate=best_candidate,
|
587 |
-
)
|
588 |
-
|
589 |
-
|
590 |
-
class PackageFinder:
|
591 |
-
"""This finds packages.
|
592 |
-
|
593 |
-
This is meant to match easy_install's technique for looking for
|
594 |
-
packages, by reading pages and looking for appropriate links.
|
595 |
-
"""
|
596 |
-
|
597 |
-
def __init__(
|
598 |
-
self,
|
599 |
-
link_collector: LinkCollector,
|
600 |
-
target_python: TargetPython,
|
601 |
-
allow_yanked: bool,
|
602 |
-
format_control: Optional[FormatControl] = None,
|
603 |
-
candidate_prefs: Optional[CandidatePreferences] = None,
|
604 |
-
ignore_requires_python: Optional[bool] = None,
|
605 |
-
) -> None:
|
606 |
-
"""
|
607 |
-
This constructor is primarily meant to be used by the create() class
|
608 |
-
method and from tests.
|
609 |
-
|
610 |
-
:param format_control: A FormatControl object, used to control
|
611 |
-
the selection of source packages / binary packages when consulting
|
612 |
-
the index and links.
|
613 |
-
:param candidate_prefs: Options to use when creating a
|
614 |
-
CandidateEvaluator object.
|
615 |
-
"""
|
616 |
-
if candidate_prefs is None:
|
617 |
-
candidate_prefs = CandidatePreferences()
|
618 |
-
|
619 |
-
format_control = format_control or FormatControl(set(), set())
|
620 |
-
|
621 |
-
self._allow_yanked = allow_yanked
|
622 |
-
self._candidate_prefs = candidate_prefs
|
623 |
-
self._ignore_requires_python = ignore_requires_python
|
624 |
-
self._link_collector = link_collector
|
625 |
-
self._target_python = target_python
|
626 |
-
|
627 |
-
self.format_control = format_control
|
628 |
-
|
629 |
-
# These are boring links that have already been logged somehow.
|
630 |
-
self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
|
631 |
-
|
632 |
-
# Don't include an allow_yanked default value to make sure each call
|
633 |
-
# site considers whether yanked releases are allowed. This also causes
|
634 |
-
# that decision to be made explicit in the calling code, which helps
|
635 |
-
# people when reading the code.
|
636 |
-
@classmethod
|
637 |
-
def create(
|
638 |
-
cls,
|
639 |
-
link_collector: LinkCollector,
|
640 |
-
selection_prefs: SelectionPreferences,
|
641 |
-
target_python: Optional[TargetPython] = None,
|
642 |
-
) -> "PackageFinder":
|
643 |
-
"""Create a PackageFinder.
|
644 |
-
|
645 |
-
:param selection_prefs: The candidate selection preferences, as a
|
646 |
-
SelectionPreferences object.
|
647 |
-
:param target_python: The target Python interpreter to use when
|
648 |
-
checking compatibility. If None (the default), a TargetPython
|
649 |
-
object will be constructed from the running Python.
|
650 |
-
"""
|
651 |
-
if target_python is None:
|
652 |
-
target_python = TargetPython()
|
653 |
-
|
654 |
-
candidate_prefs = CandidatePreferences(
|
655 |
-
prefer_binary=selection_prefs.prefer_binary,
|
656 |
-
allow_all_prereleases=selection_prefs.allow_all_prereleases,
|
657 |
-
)
|
658 |
-
|
659 |
-
return cls(
|
660 |
-
candidate_prefs=candidate_prefs,
|
661 |
-
link_collector=link_collector,
|
662 |
-
target_python=target_python,
|
663 |
-
allow_yanked=selection_prefs.allow_yanked,
|
664 |
-
format_control=selection_prefs.format_control,
|
665 |
-
ignore_requires_python=selection_prefs.ignore_requires_python,
|
666 |
-
)
|
667 |
-
|
668 |
-
@property
|
669 |
-
def target_python(self) -> TargetPython:
|
670 |
-
return self._target_python
|
671 |
-
|
672 |
-
@property
|
673 |
-
def search_scope(self) -> SearchScope:
|
674 |
-
return self._link_collector.search_scope
|
675 |
-
|
676 |
-
@search_scope.setter
|
677 |
-
def search_scope(self, search_scope: SearchScope) -> None:
|
678 |
-
self._link_collector.search_scope = search_scope
|
679 |
-
|
680 |
-
@property
|
681 |
-
def find_links(self) -> List[str]:
|
682 |
-
return self._link_collector.find_links
|
683 |
-
|
684 |
-
@property
|
685 |
-
def index_urls(self) -> List[str]:
|
686 |
-
return self.search_scope.index_urls
|
687 |
-
|
688 |
-
@property
|
689 |
-
def trusted_hosts(self) -> Iterable[str]:
|
690 |
-
for host_port in self._link_collector.session.pip_trusted_origins:
|
691 |
-
yield build_netloc(*host_port)
|
692 |
-
|
693 |
-
@property
|
694 |
-
def allow_all_prereleases(self) -> bool:
|
695 |
-
return self._candidate_prefs.allow_all_prereleases
|
696 |
-
|
697 |
-
def set_allow_all_prereleases(self) -> None:
|
698 |
-
self._candidate_prefs.allow_all_prereleases = True
|
699 |
-
|
700 |
-
@property
|
701 |
-
def prefer_binary(self) -> bool:
|
702 |
-
return self._candidate_prefs.prefer_binary
|
703 |
-
|
704 |
-
def set_prefer_binary(self) -> None:
|
705 |
-
self._candidate_prefs.prefer_binary = True
|
706 |
-
|
707 |
-
def requires_python_skipped_reasons(self) -> List[str]:
|
708 |
-
reasons = {
|
709 |
-
detail
|
710 |
-
for _, result, detail in self._logged_links
|
711 |
-
if result == LinkType.requires_python_mismatch
|
712 |
-
}
|
713 |
-
return sorted(reasons)
|
714 |
-
|
715 |
-
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
|
716 |
-
canonical_name = canonicalize_name(project_name)
|
717 |
-
formats = self.format_control.get_allowed_formats(canonical_name)
|
718 |
-
|
719 |
-
return LinkEvaluator(
|
720 |
-
project_name=project_name,
|
721 |
-
canonical_name=canonical_name,
|
722 |
-
formats=formats,
|
723 |
-
target_python=self._target_python,
|
724 |
-
allow_yanked=self._allow_yanked,
|
725 |
-
ignore_requires_python=self._ignore_requires_python,
|
726 |
-
)
|
727 |
-
|
728 |
-
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
|
729 |
-
"""
|
730 |
-
Returns elements of links in order, non-egg links first, egg links
|
731 |
-
second, while eliminating duplicates
|
732 |
-
"""
|
733 |
-
eggs, no_eggs = [], []
|
734 |
-
seen: Set[Link] = set()
|
735 |
-
for link in links:
|
736 |
-
if link not in seen:
|
737 |
-
seen.add(link)
|
738 |
-
if link.egg_fragment:
|
739 |
-
eggs.append(link)
|
740 |
-
else:
|
741 |
-
no_eggs.append(link)
|
742 |
-
return no_eggs + eggs
|
743 |
-
|
744 |
-
def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
|
745 |
-
entry = (link, result, detail)
|
746 |
-
if entry not in self._logged_links:
|
747 |
-
# Put the link at the end so the reason is more visible and because
|
748 |
-
# the link string is usually very long.
|
749 |
-
logger.debug("Skipping link: %s: %s", detail, link)
|
750 |
-
self._logged_links.add(entry)
|
751 |
-
|
752 |
-
def get_install_candidate(
|
753 |
-
self, link_evaluator: LinkEvaluator, link: Link
|
754 |
-
) -> Optional[InstallationCandidate]:
|
755 |
-
"""
|
756 |
-
If the link is a candidate for install, convert it to an
|
757 |
-
InstallationCandidate and return it. Otherwise, return None.
|
758 |
-
"""
|
759 |
-
result, detail = link_evaluator.evaluate_link(link)
|
760 |
-
if result != LinkType.candidate:
|
761 |
-
self._log_skipped_link(link, result, detail)
|
762 |
-
return None
|
763 |
-
|
764 |
-
return InstallationCandidate(
|
765 |
-
name=link_evaluator.project_name,
|
766 |
-
link=link,
|
767 |
-
version=detail,
|
768 |
-
)
|
769 |
-
|
770 |
-
def evaluate_links(
|
771 |
-
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
|
772 |
-
) -> List[InstallationCandidate]:
|
773 |
-
"""
|
774 |
-
Convert links that are candidates to InstallationCandidate objects.
|
775 |
-
"""
|
776 |
-
candidates = []
|
777 |
-
for link in self._sort_links(links):
|
778 |
-
candidate = self.get_install_candidate(link_evaluator, link)
|
779 |
-
if candidate is not None:
|
780 |
-
candidates.append(candidate)
|
781 |
-
|
782 |
-
return candidates
|
783 |
-
|
784 |
-
def process_project_url(
|
785 |
-
self, project_url: Link, link_evaluator: LinkEvaluator
|
786 |
-
) -> List[InstallationCandidate]:
|
787 |
-
logger.debug(
|
788 |
-
"Fetching project page and analyzing links: %s",
|
789 |
-
project_url,
|
790 |
-
)
|
791 |
-
index_response = self._link_collector.fetch_response(project_url)
|
792 |
-
if index_response is None:
|
793 |
-
return []
|
794 |
-
|
795 |
-
page_links = list(parse_links(index_response))
|
796 |
-
|
797 |
-
with indent_log():
|
798 |
-
package_links = self.evaluate_links(
|
799 |
-
link_evaluator,
|
800 |
-
links=page_links,
|
801 |
-
)
|
802 |
-
|
803 |
-
return package_links
|
804 |
-
|
805 |
-
@functools.lru_cache(maxsize=None)
|
806 |
-
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
|
807 |
-
"""Find all available InstallationCandidate for project_name
|
808 |
-
|
809 |
-
This checks index_urls and find_links.
|
810 |
-
All versions found are returned as an InstallationCandidate list.
|
811 |
-
|
812 |
-
See LinkEvaluator.evaluate_link() for details on which files
|
813 |
-
are accepted.
|
814 |
-
"""
|
815 |
-
link_evaluator = self.make_link_evaluator(project_name)
|
816 |
-
|
817 |
-
collected_sources = self._link_collector.collect_sources(
|
818 |
-
project_name=project_name,
|
819 |
-
candidates_from_page=functools.partial(
|
820 |
-
self.process_project_url,
|
821 |
-
link_evaluator=link_evaluator,
|
822 |
-
),
|
823 |
-
)
|
824 |
-
|
825 |
-
page_candidates_it = itertools.chain.from_iterable(
|
826 |
-
source.page_candidates()
|
827 |
-
for sources in collected_sources
|
828 |
-
for source in sources
|
829 |
-
if source is not None
|
830 |
-
)
|
831 |
-
page_candidates = list(page_candidates_it)
|
832 |
-
|
833 |
-
file_links_it = itertools.chain.from_iterable(
|
834 |
-
source.file_links()
|
835 |
-
for sources in collected_sources
|
836 |
-
for source in sources
|
837 |
-
if source is not None
|
838 |
-
)
|
839 |
-
file_candidates = self.evaluate_links(
|
840 |
-
link_evaluator,
|
841 |
-
sorted(file_links_it, reverse=True),
|
842 |
-
)
|
843 |
-
|
844 |
-
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
|
845 |
-
paths = []
|
846 |
-
for candidate in file_candidates:
|
847 |
-
assert candidate.link.url # we need to have a URL
|
848 |
-
try:
|
849 |
-
paths.append(candidate.link.file_path)
|
850 |
-
except Exception:
|
851 |
-
paths.append(candidate.link.url) # it's not a local file
|
852 |
-
|
853 |
-
logger.debug("Local files found: %s", ", ".join(paths))
|
854 |
-
|
855 |
-
# This is an intentional priority ordering
|
856 |
-
return file_candidates + page_candidates
|
857 |
-
|
858 |
-
def make_candidate_evaluator(
|
859 |
-
self,
|
860 |
-
project_name: str,
|
861 |
-
specifier: Optional[specifiers.BaseSpecifier] = None,
|
862 |
-
hashes: Optional[Hashes] = None,
|
863 |
-
) -> CandidateEvaluator:
|
864 |
-
"""Create a CandidateEvaluator object to use."""
|
865 |
-
candidate_prefs = self._candidate_prefs
|
866 |
-
return CandidateEvaluator.create(
|
867 |
-
project_name=project_name,
|
868 |
-
target_python=self._target_python,
|
869 |
-
prefer_binary=candidate_prefs.prefer_binary,
|
870 |
-
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
|
871 |
-
specifier=specifier,
|
872 |
-
hashes=hashes,
|
873 |
-
)
|
874 |
-
|
875 |
-
@functools.lru_cache(maxsize=None)
|
876 |
-
def find_best_candidate(
|
877 |
-
self,
|
878 |
-
project_name: str,
|
879 |
-
specifier: Optional[specifiers.BaseSpecifier] = None,
|
880 |
-
hashes: Optional[Hashes] = None,
|
881 |
-
) -> BestCandidateResult:
|
882 |
-
"""Find matches for the given project and specifier.
|
883 |
-
|
884 |
-
:param specifier: An optional object implementing `filter`
|
885 |
-
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
|
886 |
-
versions.
|
887 |
-
|
888 |
-
:return: A `BestCandidateResult` instance.
|
889 |
-
"""
|
890 |
-
candidates = self.find_all_candidates(project_name)
|
891 |
-
candidate_evaluator = self.make_candidate_evaluator(
|
892 |
-
project_name=project_name,
|
893 |
-
specifier=specifier,
|
894 |
-
hashes=hashes,
|
895 |
-
)
|
896 |
-
return candidate_evaluator.compute_best_candidate(candidates)
|
897 |
-
|
898 |
-
def find_requirement(
|
899 |
-
self, req: InstallRequirement, upgrade: bool
|
900 |
-
) -> Optional[InstallationCandidate]:
|
901 |
-
"""Try to find a Link matching req
|
902 |
-
|
903 |
-
Expects req, an InstallRequirement and upgrade, a boolean
|
904 |
-
Returns a InstallationCandidate if found,
|
905 |
-
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
|
906 |
-
"""
|
907 |
-
hashes = req.hashes(trust_internet=False)
|
908 |
-
best_candidate_result = self.find_best_candidate(
|
909 |
-
req.name,
|
910 |
-
specifier=req.specifier,
|
911 |
-
hashes=hashes,
|
912 |
-
)
|
913 |
-
best_candidate = best_candidate_result.best_candidate
|
914 |
-
|
915 |
-
installed_version: Optional[_BaseVersion] = None
|
916 |
-
if req.satisfied_by is not None:
|
917 |
-
installed_version = req.satisfied_by.version
|
918 |
-
|
919 |
-
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
|
920 |
-
# This repeated parse_version and str() conversion is needed to
|
921 |
-
# handle different vendoring sources from pip and pkg_resources.
|
922 |
-
# If we stop using the pkg_resources provided specifier and start
|
923 |
-
# using our own, we can drop the cast to str().
|
924 |
-
return (
|
925 |
-
", ".join(
|
926 |
-
sorted(
|
927 |
-
{str(c.version) for c in cand_iter},
|
928 |
-
key=parse_version,
|
929 |
-
)
|
930 |
-
)
|
931 |
-
or "none"
|
932 |
-
)
|
933 |
-
|
934 |
-
if installed_version is None and best_candidate is None:
|
935 |
-
logger.critical(
|
936 |
-
"Could not find a version that satisfies the requirement %s "
|
937 |
-
"(from versions: %s)",
|
938 |
-
req,
|
939 |
-
_format_versions(best_candidate_result.iter_all()),
|
940 |
-
)
|
941 |
-
|
942 |
-
raise DistributionNotFound(
|
943 |
-
"No matching distribution found for {}".format(req)
|
944 |
-
)
|
945 |
-
|
946 |
-
def _should_install_candidate(
|
947 |
-
candidate: Optional[InstallationCandidate],
|
948 |
-
) -> "TypeGuard[InstallationCandidate]":
|
949 |
-
if installed_version is None:
|
950 |
-
return True
|
951 |
-
if best_candidate is None:
|
952 |
-
return False
|
953 |
-
return best_candidate.version > installed_version
|
954 |
-
|
955 |
-
if not upgrade and installed_version is not None:
|
956 |
-
if _should_install_candidate(best_candidate):
|
957 |
-
logger.debug(
|
958 |
-
"Existing installed version (%s) satisfies requirement "
|
959 |
-
"(most up-to-date version is %s)",
|
960 |
-
installed_version,
|
961 |
-
best_candidate.version,
|
962 |
-
)
|
963 |
-
else:
|
964 |
-
logger.debug(
|
965 |
-
"Existing installed version (%s) is most up-to-date and "
|
966 |
-
"satisfies requirement",
|
967 |
-
installed_version,
|
968 |
-
)
|
969 |
-
return None
|
970 |
-
|
971 |
-
if _should_install_candidate(best_candidate):
|
972 |
-
logger.debug(
|
973 |
-
"Using version %s (newest of versions: %s)",
|
974 |
-
best_candidate.version,
|
975 |
-
_format_versions(best_candidate_result.iter_applicable()),
|
976 |
-
)
|
977 |
-
return best_candidate
|
978 |
-
|
979 |
-
# We have an existing version, and its the best version
|
980 |
-
logger.debug(
|
981 |
-
"Installed version (%s) is most up-to-date (past versions: %s)",
|
982 |
-
installed_version,
|
983 |
-
_format_versions(best_candidate_result.iter_applicable()),
|
984 |
-
)
|
985 |
-
raise BestVersionAlreadyInstalled
|
986 |
-
|
987 |
-
|
988 |
-
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
|
989 |
-
"""Find the separator's index based on the package's canonical name.
|
990 |
-
|
991 |
-
:param fragment: A <package>+<version> filename "fragment" (stem) or
|
992 |
-
egg fragment.
|
993 |
-
:param canonical_name: The package's canonical name.
|
994 |
-
|
995 |
-
This function is needed since the canonicalized name does not necessarily
|
996 |
-
have the same length as the egg info's name part. An example::
|
997 |
-
|
998 |
-
>>> fragment = 'foo__bar-1.0'
|
999 |
-
>>> canonical_name = 'foo-bar'
|
1000 |
-
>>> _find_name_version_sep(fragment, canonical_name)
|
1001 |
-
8
|
1002 |
-
"""
|
1003 |
-
# Project name and version must be separated by one single dash. Find all
|
1004 |
-
# occurrences of dashes; if the string in front of it matches the canonical
|
1005 |
-
# name, this is the one separating the name and version parts.
|
1006 |
-
for i, c in enumerate(fragment):
|
1007 |
-
if c != "-":
|
1008 |
-
continue
|
1009 |
-
if canonicalize_name(fragment[:i]) == canonical_name:
|
1010 |
-
return i
|
1011 |
-
raise ValueError(f"{fragment} does not match {canonical_name}")
|
1012 |
-
|
1013 |
-
|
1014 |
-
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
|
1015 |
-
"""Parse the version string from a <package>+<version> filename
|
1016 |
-
"fragment" (stem) or egg fragment.
|
1017 |
-
|
1018 |
-
:param fragment: The string to parse. E.g. foo-2.1
|
1019 |
-
:param canonical_name: The canonicalized name of the package this
|
1020 |
-
belongs to.
|
1021 |
-
"""
|
1022 |
-
try:
|
1023 |
-
version_start = _find_name_version_sep(fragment, canonical_name) + 1
|
1024 |
-
except ValueError:
|
1025 |
-
return None
|
1026 |
-
version = fragment[version_start:]
|
1027 |
-
if not version:
|
1028 |
-
return None
|
1029 |
-
return version
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
from __future__ import division
|
6 |
-
|
7 |
-
from datetime import datetime
|
8 |
-
from pip._vendor.cachecontrol.cache import BaseCache
|
9 |
-
|
10 |
-
|
11 |
-
class RedisCache(BaseCache):
|
12 |
-
|
13 |
-
def __init__(self, conn):
|
14 |
-
self.conn = conn
|
15 |
-
|
16 |
-
def get(self, key):
|
17 |
-
return self.conn.get(key)
|
18 |
-
|
19 |
-
def set(self, key, value, expires=None):
|
20 |
-
if not expires:
|
21 |
-
self.conn.set(key, value)
|
22 |
-
elif isinstance(expires, datetime):
|
23 |
-
expires = expires - datetime.utcnow()
|
24 |
-
self.conn.setex(key, int(expires.total_seconds()), value)
|
25 |
-
else:
|
26 |
-
self.conn.setex(key, expires, value)
|
27 |
-
|
28 |
-
def delete(self, key):
|
29 |
-
self.conn.delete(key)
|
30 |
-
|
31 |
-
def clear(self):
|
32 |
-
"""Helper for clearing all the keys in a database. Use with
|
33 |
-
caution!"""
|
34 |
-
for key in self.conn.keys():
|
35 |
-
self.conn.delete(key)
|
36 |
-
|
37 |
-
def close(self):
|
38 |
-
"""Redis uses connection pooling, no need to close the connection."""
|
39 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/bandwidth.py
DELETED
@@ -1,439 +0,0 @@
|
|
1 |
-
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import threading
|
14 |
-
import time
|
15 |
-
|
16 |
-
|
17 |
-
class RequestExceededException(Exception):
|
18 |
-
def __init__(self, requested_amt, retry_time):
|
19 |
-
"""Error when requested amount exceeds what is allowed
|
20 |
-
|
21 |
-
The request that raised this error should be retried after waiting
|
22 |
-
the time specified by ``retry_time``.
|
23 |
-
|
24 |
-
:type requested_amt: int
|
25 |
-
:param requested_amt: The originally requested byte amount
|
26 |
-
|
27 |
-
:type retry_time: float
|
28 |
-
:param retry_time: The length in time to wait to retry for the
|
29 |
-
requested amount
|
30 |
-
"""
|
31 |
-
self.requested_amt = requested_amt
|
32 |
-
self.retry_time = retry_time
|
33 |
-
msg = 'Request amount {} exceeded the amount available. Retry in {}'.format(
|
34 |
-
requested_amt, retry_time
|
35 |
-
)
|
36 |
-
super().__init__(msg)
|
37 |
-
|
38 |
-
|
39 |
-
class RequestToken:
|
40 |
-
"""A token to pass as an identifier when consuming from the LeakyBucket"""
|
41 |
-
|
42 |
-
pass
|
43 |
-
|
44 |
-
|
45 |
-
class TimeUtils:
|
46 |
-
def time(self):
|
47 |
-
"""Get the current time back
|
48 |
-
|
49 |
-
:rtype: float
|
50 |
-
:returns: The current time in seconds
|
51 |
-
"""
|
52 |
-
return time.time()
|
53 |
-
|
54 |
-
def sleep(self, value):
|
55 |
-
"""Sleep for a designated time
|
56 |
-
|
57 |
-
:type value: float
|
58 |
-
:param value: The time to sleep for in seconds
|
59 |
-
"""
|
60 |
-
return time.sleep(value)
|
61 |
-
|
62 |
-
|
63 |
-
class BandwidthLimiter:
|
64 |
-
def __init__(self, leaky_bucket, time_utils=None):
|
65 |
-
"""Limits bandwidth for shared S3 transfers
|
66 |
-
|
67 |
-
:type leaky_bucket: LeakyBucket
|
68 |
-
:param leaky_bucket: The leaky bucket to use limit bandwidth
|
69 |
-
|
70 |
-
:type time_utils: TimeUtils
|
71 |
-
:param time_utils: Time utility to use for interacting with time.
|
72 |
-
"""
|
73 |
-
self._leaky_bucket = leaky_bucket
|
74 |
-
self._time_utils = time_utils
|
75 |
-
if time_utils is None:
|
76 |
-
self._time_utils = TimeUtils()
|
77 |
-
|
78 |
-
def get_bandwith_limited_stream(
|
79 |
-
self, fileobj, transfer_coordinator, enabled=True
|
80 |
-
):
|
81 |
-
"""Wraps a fileobj in a bandwidth limited stream wrapper
|
82 |
-
|
83 |
-
:type fileobj: file-like obj
|
84 |
-
:param fileobj: The file-like obj to wrap
|
85 |
-
|
86 |
-
:type transfer_coordinator: s3transfer.futures.TransferCoordinator
|
87 |
-
param transfer_coordinator: The coordinator for the general transfer
|
88 |
-
that the wrapped stream is a part of
|
89 |
-
|
90 |
-
:type enabled: boolean
|
91 |
-
:param enabled: Whether bandwidth limiting should be enabled to start
|
92 |
-
"""
|
93 |
-
stream = BandwidthLimitedStream(
|
94 |
-
fileobj, self._leaky_bucket, transfer_coordinator, self._time_utils
|
95 |
-
)
|
96 |
-
if not enabled:
|
97 |
-
stream.disable_bandwidth_limiting()
|
98 |
-
return stream
|
99 |
-
|
100 |
-
|
101 |
-
class BandwidthLimitedStream:
|
102 |
-
def __init__(
|
103 |
-
self,
|
104 |
-
fileobj,
|
105 |
-
leaky_bucket,
|
106 |
-
transfer_coordinator,
|
107 |
-
time_utils=None,
|
108 |
-
bytes_threshold=256 * 1024,
|
109 |
-
):
|
110 |
-
"""Limits bandwidth for reads on a wrapped stream
|
111 |
-
|
112 |
-
:type fileobj: file-like object
|
113 |
-
:param fileobj: The file like object to wrap
|
114 |
-
|
115 |
-
:type leaky_bucket: LeakyBucket
|
116 |
-
:param leaky_bucket: The leaky bucket to use to throttle reads on
|
117 |
-
the stream
|
118 |
-
|
119 |
-
:type transfer_coordinator: s3transfer.futures.TransferCoordinator
|
120 |
-
param transfer_coordinator: The coordinator for the general transfer
|
121 |
-
that the wrapped stream is a part of
|
122 |
-
|
123 |
-
:type time_utils: TimeUtils
|
124 |
-
:param time_utils: The time utility to use for interacting with time
|
125 |
-
"""
|
126 |
-
self._fileobj = fileobj
|
127 |
-
self._leaky_bucket = leaky_bucket
|
128 |
-
self._transfer_coordinator = transfer_coordinator
|
129 |
-
self._time_utils = time_utils
|
130 |
-
if time_utils is None:
|
131 |
-
self._time_utils = TimeUtils()
|
132 |
-
self._bandwidth_limiting_enabled = True
|
133 |
-
self._request_token = RequestToken()
|
134 |
-
self._bytes_seen = 0
|
135 |
-
self._bytes_threshold = bytes_threshold
|
136 |
-
|
137 |
-
def enable_bandwidth_limiting(self):
|
138 |
-
"""Enable bandwidth limiting on reads to the stream"""
|
139 |
-
self._bandwidth_limiting_enabled = True
|
140 |
-
|
141 |
-
def disable_bandwidth_limiting(self):
|
142 |
-
"""Disable bandwidth limiting on reads to the stream"""
|
143 |
-
self._bandwidth_limiting_enabled = False
|
144 |
-
|
145 |
-
def read(self, amount):
|
146 |
-
"""Read a specified amount
|
147 |
-
|
148 |
-
Reads will only be throttled if bandwidth limiting is enabled.
|
149 |
-
"""
|
150 |
-
if not self._bandwidth_limiting_enabled:
|
151 |
-
return self._fileobj.read(amount)
|
152 |
-
|
153 |
-
# We do not want to be calling consume on every read as the read
|
154 |
-
# amounts can be small causing the lock of the leaky bucket to
|
155 |
-
# introduce noticeable overhead. So instead we keep track of
|
156 |
-
# how many bytes we have seen and only call consume once we pass a
|
157 |
-
# certain threshold.
|
158 |
-
self._bytes_seen += amount
|
159 |
-
if self._bytes_seen < self._bytes_threshold:
|
160 |
-
return self._fileobj.read(amount)
|
161 |
-
|
162 |
-
self._consume_through_leaky_bucket()
|
163 |
-
return self._fileobj.read(amount)
|
164 |
-
|
165 |
-
def _consume_through_leaky_bucket(self):
|
166 |
-
# NOTE: If the read amount on the stream are high, it will result
|
167 |
-
# in large bursty behavior as there is not an interface for partial
|
168 |
-
# reads. However given the read's on this abstraction are at most 256KB
|
169 |
-
# (via downloads), it reduces the burstiness to be small KB bursts at
|
170 |
-
# worst.
|
171 |
-
while not self._transfer_coordinator.exception:
|
172 |
-
try:
|
173 |
-
self._leaky_bucket.consume(
|
174 |
-
self._bytes_seen, self._request_token
|
175 |
-
)
|
176 |
-
self._bytes_seen = 0
|
177 |
-
return
|
178 |
-
except RequestExceededException as e:
|
179 |
-
self._time_utils.sleep(e.retry_time)
|
180 |
-
else:
|
181 |
-
raise self._transfer_coordinator.exception
|
182 |
-
|
183 |
-
def signal_transferring(self):
|
184 |
-
"""Signal that data being read is being transferred to S3"""
|
185 |
-
self.enable_bandwidth_limiting()
|
186 |
-
|
187 |
-
def signal_not_transferring(self):
|
188 |
-
"""Signal that data being read is not being transferred to S3"""
|
189 |
-
self.disable_bandwidth_limiting()
|
190 |
-
|
191 |
-
def seek(self, where, whence=0):
|
192 |
-
self._fileobj.seek(where, whence)
|
193 |
-
|
194 |
-
def tell(self):
|
195 |
-
return self._fileobj.tell()
|
196 |
-
|
197 |
-
def close(self):
|
198 |
-
if self._bandwidth_limiting_enabled and self._bytes_seen:
|
199 |
-
# This handles the case where the file is small enough to never
|
200 |
-
# trigger the threshold and thus is never subjugated to the
|
201 |
-
# leaky bucket on read(). This specifically happens for small
|
202 |
-
# uploads. So instead to account for those bytes, have
|
203 |
-
# it go through the leaky bucket when the file gets closed.
|
204 |
-
self._consume_through_leaky_bucket()
|
205 |
-
self._fileobj.close()
|
206 |
-
|
207 |
-
def __enter__(self):
|
208 |
-
return self
|
209 |
-
|
210 |
-
def __exit__(self, *args, **kwargs):
|
211 |
-
self.close()
|
212 |
-
|
213 |
-
|
214 |
-
class LeakyBucket:
|
215 |
-
def __init__(
|
216 |
-
self,
|
217 |
-
max_rate,
|
218 |
-
time_utils=None,
|
219 |
-
rate_tracker=None,
|
220 |
-
consumption_scheduler=None,
|
221 |
-
):
|
222 |
-
"""A leaky bucket abstraction to limit bandwidth consumption
|
223 |
-
|
224 |
-
:type rate: int
|
225 |
-
:type rate: The maximum rate to allow. This rate is in terms of
|
226 |
-
bytes per second.
|
227 |
-
|
228 |
-
:type time_utils: TimeUtils
|
229 |
-
:param time_utils: The time utility to use for interacting with time
|
230 |
-
|
231 |
-
:type rate_tracker: BandwidthRateTracker
|
232 |
-
:param rate_tracker: Tracks bandwidth consumption
|
233 |
-
|
234 |
-
:type consumption_scheduler: ConsumptionScheduler
|
235 |
-
:param consumption_scheduler: Schedules consumption retries when
|
236 |
-
necessary
|
237 |
-
"""
|
238 |
-
self._max_rate = float(max_rate)
|
239 |
-
self._time_utils = time_utils
|
240 |
-
if time_utils is None:
|
241 |
-
self._time_utils = TimeUtils()
|
242 |
-
self._lock = threading.Lock()
|
243 |
-
self._rate_tracker = rate_tracker
|
244 |
-
if rate_tracker is None:
|
245 |
-
self._rate_tracker = BandwidthRateTracker()
|
246 |
-
self._consumption_scheduler = consumption_scheduler
|
247 |
-
if consumption_scheduler is None:
|
248 |
-
self._consumption_scheduler = ConsumptionScheduler()
|
249 |
-
|
250 |
-
def consume(self, amt, request_token):
|
251 |
-
"""Consume an a requested amount
|
252 |
-
|
253 |
-
:type amt: int
|
254 |
-
:param amt: The amount of bytes to request to consume
|
255 |
-
|
256 |
-
:type request_token: RequestToken
|
257 |
-
:param request_token: The token associated to the consumption
|
258 |
-
request that is used to identify the request. So if a
|
259 |
-
RequestExceededException is raised the token should be used
|
260 |
-
in subsequent retry consume() request.
|
261 |
-
|
262 |
-
:raises RequestExceededException: If the consumption amount would
|
263 |
-
exceed the maximum allocated bandwidth
|
264 |
-
|
265 |
-
:rtype: int
|
266 |
-
:returns: The amount consumed
|
267 |
-
"""
|
268 |
-
with self._lock:
|
269 |
-
time_now = self._time_utils.time()
|
270 |
-
if self._consumption_scheduler.is_scheduled(request_token):
|
271 |
-
return self._release_requested_amt_for_scheduled_request(
|
272 |
-
amt, request_token, time_now
|
273 |
-
)
|
274 |
-
elif self._projected_to_exceed_max_rate(amt, time_now):
|
275 |
-
self._raise_request_exceeded_exception(
|
276 |
-
amt, request_token, time_now
|
277 |
-
)
|
278 |
-
else:
|
279 |
-
return self._release_requested_amt(amt, time_now)
|
280 |
-
|
281 |
-
def _projected_to_exceed_max_rate(self, amt, time_now):
|
282 |
-
projected_rate = self._rate_tracker.get_projected_rate(amt, time_now)
|
283 |
-
return projected_rate > self._max_rate
|
284 |
-
|
285 |
-
def _release_requested_amt_for_scheduled_request(
|
286 |
-
self, amt, request_token, time_now
|
287 |
-
):
|
288 |
-
self._consumption_scheduler.process_scheduled_consumption(
|
289 |
-
request_token
|
290 |
-
)
|
291 |
-
return self._release_requested_amt(amt, time_now)
|
292 |
-
|
293 |
-
def _raise_request_exceeded_exception(self, amt, request_token, time_now):
|
294 |
-
allocated_time = amt / float(self._max_rate)
|
295 |
-
retry_time = self._consumption_scheduler.schedule_consumption(
|
296 |
-
amt, request_token, allocated_time
|
297 |
-
)
|
298 |
-
raise RequestExceededException(
|
299 |
-
requested_amt=amt, retry_time=retry_time
|
300 |
-
)
|
301 |
-
|
302 |
-
def _release_requested_amt(self, amt, time_now):
|
303 |
-
self._rate_tracker.record_consumption_rate(amt, time_now)
|
304 |
-
return amt
|
305 |
-
|
306 |
-
|
307 |
-
class ConsumptionScheduler:
|
308 |
-
def __init__(self):
|
309 |
-
"""Schedules when to consume a desired amount"""
|
310 |
-
self._tokens_to_scheduled_consumption = {}
|
311 |
-
self._total_wait = 0
|
312 |
-
|
313 |
-
def is_scheduled(self, token):
|
314 |
-
"""Indicates if a consumption request has been scheduled
|
315 |
-
|
316 |
-
:type token: RequestToken
|
317 |
-
:param token: The token associated to the consumption
|
318 |
-
request that is used to identify the request.
|
319 |
-
"""
|
320 |
-
return token in self._tokens_to_scheduled_consumption
|
321 |
-
|
322 |
-
def schedule_consumption(self, amt, token, time_to_consume):
|
323 |
-
"""Schedules a wait time to be able to consume an amount
|
324 |
-
|
325 |
-
:type amt: int
|
326 |
-
:param amt: The amount of bytes scheduled to be consumed
|
327 |
-
|
328 |
-
:type token: RequestToken
|
329 |
-
:param token: The token associated to the consumption
|
330 |
-
request that is used to identify the request.
|
331 |
-
|
332 |
-
:type time_to_consume: float
|
333 |
-
:param time_to_consume: The desired time it should take for that
|
334 |
-
specific request amount to be consumed in regardless of previously
|
335 |
-
scheduled consumption requests
|
336 |
-
|
337 |
-
:rtype: float
|
338 |
-
:returns: The amount of time to wait for the specific request before
|
339 |
-
actually consuming the specified amount.
|
340 |
-
"""
|
341 |
-
self._total_wait += time_to_consume
|
342 |
-
self._tokens_to_scheduled_consumption[token] = {
|
343 |
-
'wait_duration': self._total_wait,
|
344 |
-
'time_to_consume': time_to_consume,
|
345 |
-
}
|
346 |
-
return self._total_wait
|
347 |
-
|
348 |
-
def process_scheduled_consumption(self, token):
|
349 |
-
"""Processes a scheduled consumption request that has completed
|
350 |
-
|
351 |
-
:type token: RequestToken
|
352 |
-
:param token: The token associated to the consumption
|
353 |
-
request that is used to identify the request.
|
354 |
-
"""
|
355 |
-
scheduled_retry = self._tokens_to_scheduled_consumption.pop(token)
|
356 |
-
self._total_wait = max(
|
357 |
-
self._total_wait - scheduled_retry['time_to_consume'], 0
|
358 |
-
)
|
359 |
-
|
360 |
-
|
361 |
-
class BandwidthRateTracker:
|
362 |
-
def __init__(self, alpha=0.8):
|
363 |
-
"""Tracks the rate of bandwidth consumption
|
364 |
-
|
365 |
-
:type a: float
|
366 |
-
:param a: The constant to use in calculating the exponentional moving
|
367 |
-
average of the bandwidth rate. Specifically it is used in the
|
368 |
-
following calculation:
|
369 |
-
|
370 |
-
current_rate = alpha * new_rate + (1 - alpha) * current_rate
|
371 |
-
|
372 |
-
This value of this constant should be between 0 and 1.
|
373 |
-
"""
|
374 |
-
self._alpha = alpha
|
375 |
-
self._last_time = None
|
376 |
-
self._current_rate = None
|
377 |
-
|
378 |
-
@property
|
379 |
-
def current_rate(self):
|
380 |
-
"""The current transfer rate
|
381 |
-
|
382 |
-
:rtype: float
|
383 |
-
:returns: The current tracked transfer rate
|
384 |
-
"""
|
385 |
-
if self._last_time is None:
|
386 |
-
return 0.0
|
387 |
-
return self._current_rate
|
388 |
-
|
389 |
-
def get_projected_rate(self, amt, time_at_consumption):
|
390 |
-
"""Get the projected rate using a provided amount and time
|
391 |
-
|
392 |
-
:type amt: int
|
393 |
-
:param amt: The proposed amount to consume
|
394 |
-
|
395 |
-
:type time_at_consumption: float
|
396 |
-
:param time_at_consumption: The proposed time to consume at
|
397 |
-
|
398 |
-
:rtype: float
|
399 |
-
:returns: The consumption rate if that amt and time were consumed
|
400 |
-
"""
|
401 |
-
if self._last_time is None:
|
402 |
-
return 0.0
|
403 |
-
return self._calculate_exponential_moving_average_rate(
|
404 |
-
amt, time_at_consumption
|
405 |
-
)
|
406 |
-
|
407 |
-
def record_consumption_rate(self, amt, time_at_consumption):
|
408 |
-
"""Record the consumption rate based off amount and time point
|
409 |
-
|
410 |
-
:type amt: int
|
411 |
-
:param amt: The amount that got consumed
|
412 |
-
|
413 |
-
:type time_at_consumption: float
|
414 |
-
:param time_at_consumption: The time at which the amount was consumed
|
415 |
-
"""
|
416 |
-
if self._last_time is None:
|
417 |
-
self._last_time = time_at_consumption
|
418 |
-
self._current_rate = 0.0
|
419 |
-
return
|
420 |
-
self._current_rate = self._calculate_exponential_moving_average_rate(
|
421 |
-
amt, time_at_consumption
|
422 |
-
)
|
423 |
-
self._last_time = time_at_consumption
|
424 |
-
|
425 |
-
def _calculate_rate(self, amt, time_at_consumption):
|
426 |
-
time_delta = time_at_consumption - self._last_time
|
427 |
-
if time_delta <= 0:
|
428 |
-
# While it is really unlikely to see this in an actual transfer,
|
429 |
-
# we do not want to be returning back a negative rate or try to
|
430 |
-
# divide the amount by zero. So instead return back an infinite
|
431 |
-
# rate as the time delta is infinitesimally small.
|
432 |
-
return float('inf')
|
433 |
-
return amt / (time_delta)
|
434 |
-
|
435 |
-
def _calculate_exponential_moving_average_rate(
|
436 |
-
self, amt, time_at_consumption
|
437 |
-
):
|
438 |
-
new_rate = self._calculate_rate(amt, time_at_consumption)
|
439 |
-
return self._alpha * new_rate + (1 - self._alpha) * self._current_rate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/py37compat.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
|
4 |
-
def _pythonlib_compat():
|
5 |
-
"""
|
6 |
-
On Python 3.7 and earlier, distutils would include the Python
|
7 |
-
library. See pypa/distutils#9.
|
8 |
-
"""
|
9 |
-
from distutils import sysconfig
|
10 |
-
|
11 |
-
if not sysconfig.get_config_var('Py_ENABLED_SHARED'):
|
12 |
-
return
|
13 |
-
|
14 |
-
yield 'python{}.{}{}'.format(
|
15 |
-
sys.hexversion >> 24,
|
16 |
-
(sys.hexversion >> 16) & 0xFF,
|
17 |
-
sysconfig.get_config_var('ABIFLAGS'),
|
18 |
-
)
|
19 |
-
|
20 |
-
|
21 |
-
def compose(f1, f2):
|
22 |
-
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
|
23 |
-
|
24 |
-
|
25 |
-
pythonlib = (
|
26 |
-
compose(list, _pythonlib_compat)
|
27 |
-
if sys.version_info < (3, 8)
|
28 |
-
and sys.platform != 'darwin'
|
29 |
-
and sys.platform[:3] != 'aix'
|
30 |
-
else list
|
31 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/dashboard_utils/time_tracker.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
from functools import wraps
|
2 |
-
from time import time
|
3 |
-
|
4 |
-
|
5 |
-
def simple_time_tracker(log_fun):
|
6 |
-
def _simple_time_tracker(fn):
|
7 |
-
@wraps(fn)
|
8 |
-
def wrapped_fn(*args, **kwargs):
|
9 |
-
start_time = time()
|
10 |
-
|
11 |
-
try:
|
12 |
-
result = fn(*args, **kwargs)
|
13 |
-
finally:
|
14 |
-
elapsed_time = time() - start_time
|
15 |
-
|
16 |
-
# log the result
|
17 |
-
log_fun(
|
18 |
-
{
|
19 |
-
"function_name": fn.__name__,
|
20 |
-
"total_time": elapsed_time,
|
21 |
-
}
|
22 |
-
)
|
23 |
-
|
24 |
-
return result
|
25 |
-
|
26 |
-
return wrapped_fn
|
27 |
-
|
28 |
-
return _simple_time_tracker
|
29 |
-
|
30 |
-
|
31 |
-
def _log(message):
|
32 |
-
print("[SimpleTimeTracker] {function_name} {total_time:.3f}".format(**message))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/install.md
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
../../INSTALL.md
|
|
|
|
spaces/CVPR/LIVE/color.cpp
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
#include "color.h"
|
2 |
-
|
3 |
-
void LinearGradient::copy_to(ptr<float> stop_offsets,
|
4 |
-
ptr<float> stop_colors) const {
|
5 |
-
float *o = stop_offsets.get();
|
6 |
-
float *c = stop_colors.get();
|
7 |
-
for (int i = 0; i < num_stops; i++) {
|
8 |
-
o[i] = this->stop_offsets[i];
|
9 |
-
}
|
10 |
-
for (int i = 0; i < 4 * num_stops; i++) {
|
11 |
-
c[i] = this->stop_colors[i];
|
12 |
-
}
|
13 |
-
}
|
14 |
-
|
15 |
-
void RadialGradient::copy_to(ptr<float> stop_offsets,
|
16 |
-
ptr<float> stop_colors) const {
|
17 |
-
float *o = stop_offsets.get();
|
18 |
-
float *c = stop_colors.get();
|
19 |
-
for (int i = 0; i < num_stops; i++) {
|
20 |
-
o[i] = this->stop_offsets[i];
|
21 |
-
}
|
22 |
-
for (int i = 0; i < 4 * num_stops; i++) {
|
23 |
-
c[i] = this->stop_colors[i];
|
24 |
-
}
|
25 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubHeaderTesting.cmake
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# For every public header, build a translation unit containing `#include <header>`
|
2 |
-
# to let the compiler try to figure out warnings in that header if it is not otherwise
|
3 |
-
# included in tests, and also to verify if the headers are modular enough.
|
4 |
-
# .inl files are not globbed for, because they are not supposed to be used as public
|
5 |
-
# entrypoints.
|
6 |
-
|
7 |
-
file(GLOB_RECURSE headers
|
8 |
-
RELATIVE "${CUB_SOURCE_DIR}/cub"
|
9 |
-
CONFIGURE_DEPENDS
|
10 |
-
cub/*.cuh
|
11 |
-
)
|
12 |
-
|
13 |
-
set(headertest_srcs)
|
14 |
-
foreach (header IN LISTS headers)
|
15 |
-
set(headertest_src "headers/${header}.cu")
|
16 |
-
configure_file("${CUB_SOURCE_DIR}/cmake/header_test.in" "${headertest_src}")
|
17 |
-
list(APPEND headertest_srcs "${headertest_src}")
|
18 |
-
endforeach()
|
19 |
-
|
20 |
-
foreach(cub_target IN LISTS CUB_TARGETS)
|
21 |
-
cub_get_target_property(config_prefix ${cub_target} PREFIX)
|
22 |
-
|
23 |
-
set(headertest_target ${config_prefix}.headers)
|
24 |
-
add_library(${headertest_target} OBJECT ${headertest_srcs})
|
25 |
-
target_link_libraries(${headertest_target} PUBLIC ${cub_target})
|
26 |
-
cub_clone_target_properties(${headertest_target} ${cub_target})
|
27 |
-
|
28 |
-
add_dependencies(${config_prefix}.all ${headertest_target})
|
29 |
-
endforeach()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/reduce.h
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file reduce.h
|
19 |
-
* \brief Sequential implementation of reduce algorithm.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/detail/function.h>
|
26 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
27 |
-
|
28 |
-
namespace thrust
|
29 |
-
{
|
30 |
-
namespace system
|
31 |
-
{
|
32 |
-
namespace detail
|
33 |
-
{
|
34 |
-
namespace sequential
|
35 |
-
{
|
36 |
-
|
37 |
-
|
38 |
-
__thrust_exec_check_disable__
|
39 |
-
template<typename DerivedPolicy,
|
40 |
-
typename InputIterator,
|
41 |
-
typename OutputType,
|
42 |
-
typename BinaryFunction>
|
43 |
-
__host__ __device__
|
44 |
-
OutputType reduce(sequential::execution_policy<DerivedPolicy> &,
|
45 |
-
InputIterator begin,
|
46 |
-
InputIterator end,
|
47 |
-
OutputType init,
|
48 |
-
BinaryFunction binary_op)
|
49 |
-
{
|
50 |
-
// wrap binary_op
|
51 |
-
thrust::detail::wrapped_function<
|
52 |
-
BinaryFunction,
|
53 |
-
OutputType
|
54 |
-
> wrapped_binary_op(binary_op);
|
55 |
-
|
56 |
-
// initialize the result
|
57 |
-
OutputType result = init;
|
58 |
-
|
59 |
-
while(begin != end)
|
60 |
-
{
|
61 |
-
result = wrapped_binary_op(result, *begin);
|
62 |
-
++begin;
|
63 |
-
} // end while
|
64 |
-
|
65 |
-
return result;
|
66 |
-
}
|
67 |
-
|
68 |
-
|
69 |
-
} // end namespace sequential
|
70 |
-
} // end namespace detail
|
71 |
-
} // end namespace system
|
72 |
-
} // end namespace thrust
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|