BoltzmannEntropy commited on
Commit
8248c84
1 Parent(s): 3901ce3

First commit

Browse files
Files changed (5) hide show
  1. Dockerfile +93 -0
  2. README.md +1 -1
  3. app.py +131 -0
  4. build.bat +2 -0
  5. run-d.bat +1 -0
Dockerfile ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile customized for deployment on HuggingFace Spaces platform
2
+
3
+ # -- The Dockerfile has been tailored specifically for use on HuggingFace.
4
+ # -- It implies that certain modifications or optimizations have been made with HuggingFace's environment in mind.
5
+ # -- It uses "HuggingFace Spaces" to be more specific about the target platform.
6
+
7
+ # FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-devel
8
+ FROM pytorch/pytorch:2.4.0-cuda12.1-cudnn9-devel
9
+ # FOR HF
10
+
11
+ USER root
12
+
13
+ ENV DEBIAN_FRONTEND=noninteractive
14
+ RUN apt-get update && apt-get install -y \
15
+ git \
16
+ cmake \
17
+ python3 \
18
+ python3-pip \
19
+ python3-venv \
20
+ python3-dev \
21
+ python3-numpy \
22
+ gcc \
23
+ build-essential \
24
+ gfortran \
25
+ wget \
26
+ curl \
27
+ pkg-config \
28
+ software-properties-common \
29
+ zip \
30
+ && apt-get clean && rm -rf /tmp/* /var/tmp/*
31
+
32
+ RUN apt-get update && DEBIAN_FRONTEND=noninteractive \
33
+ apt-get install -y python3.10 python3-pip
34
+
35
+ RUN apt-get install -y libopenblas-base libopenmpi-dev
36
+
37
+ ENV TZ=Asia/Dubai
38
+ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
39
+
40
+
41
+
42
+ RUN useradd -m -u 1000 user
43
+
44
+ RUN apt-get update && apt-get install -y sudo && \
45
+ echo 'user ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
46
+
47
+ USER user
48
+ ENV HOME=/home/user \
49
+ PATH=/home/user/.local/bin:$PATH
50
+
51
+ RUN mkdir $HOME/app
52
+ RUN mkdir $HOME/app/test_images
53
+
54
+ # WORKDIR $HOME/app
55
+
56
+ RUN chown -R user:user $HOME/app
57
+
58
+ USER user
59
+ WORKDIR $HOME/app
60
+
61
+ RUN python -m pip install qwen-vl-utils
62
+ RUN python -m pip install --pre -U -f https://mlc.ai/wheels mlc-llm-nightly-cu122 mlc-ai-nightly-cu122
63
+ #python -m pip install --pre -U -f https://mlc.ai/wheels mlc-llm-nightly-cpu mlc-ai-nightly-cpu
64
+
65
+ RUN python3 -m pip install chromadb db-sqlite3 auto-gptq exllama sqlalchemy
66
+ WORKDIR $HOME/app
67
+ RUN git clone https://github.com/casper-hansen/AutoAWQ
68
+ WORKDIR $HOME/app/AutoAWQ/
69
+ RUN python3 -m pip install -e .
70
+ WORKDIR $HOME/app
71
+ # ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
72
+ RUN python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
73
+ RUN python -m pip install accelerate diffusers datasets timm flash-attn==2.6.1 gradio
74
+
75
+ RUN python3 -m pip install --no-deps optimum
76
+ RUN python3 -m pip install --no-deps autoawq>=0.1.8
77
+
78
+ #This seems to be a must : Intel Extension for PyTorch 2.4 needs to work with PyTorch 2.4.*, but PyTorch 2.2.2 is
79
+ RUN python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
80
+ RUN python3 -m pip install -U accelerate
81
+ RUN python3 -m pip install -U git+https://github.com/huggingface/transformers
82
+
83
+ WORKDIR $HOME/app
84
+ COPY --chown=user:user app.py .
85
+ COPY --chown=user:user test_images /home/user/app/test_images
86
+
87
+ ENV PYTHONUNBUFFERED=1 GRADIO_ALLOW_FLAGGING=never GRADIO_NUM_PORTS=1 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 SYSTEM=spaces
88
+ RUN python3 -m pip install pennylane sympy pennylane-qiskit duckdb
89
+ WORKDIR $HOME/app
90
+
91
+ EXPOSE 8097 7842 8501 8000 6666 7860
92
+
93
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -8,4 +8,4 @@ pinned: false
8
  short_description: 'An open-source dataset designed to assess whether solutions '
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
8
  short_description: 'An open-source dataset designed to assess whether solutions '
9
  ---
10
 
11
+
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import io
4
+ import duckdb
5
+ import gradio as gr
6
+ import math
7
+ from datetime import datetime
8
+ import PIL
9
+ import matplotlib.pyplot as plt
10
+ from PIL import Image
11
+ import pennylane as qml
12
+
13
+ # Hugging Face and DuckDB function placeholders
14
+ def store_in_hf_dataset(data):
15
+ # Implement storing data in the Hugging Face dataset
16
+ pass
17
+
18
+ def load_from_hf_dataset():
19
+ # Implement loading data from the Hugging Face dataset
20
+ return []
21
+
22
+ # Store image in bytes for DuckDB
23
+ def pil_image_to_bytes(image):
24
+ img_byte_arr = io.BytesIO()
25
+ image.save(img_byte_arr, format='PNG')
26
+ return img_byte_arr.getvalue()
27
+
28
+ # Function to generate a random Hamiltonian
29
+ def generate_random_hamiltonian(num_qubits):
30
+ terms = []
31
+ for _ in range(random.randint(1, 5)):
32
+ coeff = round(random.uniform(-1, 1), 2)
33
+ pauli_ops = [random.choice(['I', 'X', 'Y', 'Z']) for _ in range(num_qubits)]
34
+ term = f"{coeff} * {' '.join(pauli_ops)}"
35
+ terms.append(term)
36
+ return " + ".join(terms)
37
+
38
+ # Store data in DuckDB
39
+ def store_in_duckdb(data, db_file='quantum_hamiltonians.duckdb'):
40
+ conn = duckdb.connect(database=db_file)
41
+ conn.execute("""CREATE TABLE IF NOT EXISTS hamiltonians (
42
+ id INTEGER,
43
+ plot BLOB,
44
+ hamiltonian VARCHAR,
45
+ qasm_code VARCHAR,
46
+ trotter_code VARCHAR,
47
+ num_qubits INTEGER,
48
+ trotter_order INTEGER,
49
+ timestamp TIMESTAMP
50
+ )""")
51
+ conn.executemany("""INSERT INTO hamiltonians (id, plot, hamiltonian, qasm_code, trotter_code, num_qubits, trotter_order, timestamp)
52
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)""", data)
53
+ conn.close()
54
+
55
+ # Load results from DuckDB
56
+ def load_from_duckdb(db_file='quantum_hamiltonians.duckdb'):
57
+ conn = duckdb.connect(database=db_file)
58
+ df = conn.execute("SELECT * FROM hamiltonians").df()
59
+ conn.close()
60
+ return df
61
+
62
+ # Function to generate Hamiltonians
63
+ def generate_hamiltonians(num_hamiltonians, selected_qubits, selected_order, write_to_hf, write_to_duckdb):
64
+ results_table = []
65
+ timestamp = datetime.now()
66
+
67
+ for i in range(num_hamiltonians):
68
+ num_qubits = random.choice(selected_qubits)
69
+ order = selected_order
70
+ hamiltonian = generate_random_hamiltonian(num_qubits)
71
+ qasm_code = hamiltonian_to_qasm(hamiltonian, num_qubits)
72
+ trotter_code = trotter_decomposition(hamiltonian, order)
73
+
74
+ # Create a dummy plot (replace with actual plot creation logic)
75
+ fig, ax = plt.subplots()
76
+ ax.plot([0, 1], [0, 1])
77
+ circuit_plot_image = buffer_plot_and_get(fig)
78
+ circuit_plot_bytes = pil_image_to_bytes(circuit_plot_image)
79
+
80
+ # Append data to results table
81
+ results_table.append((i + 1, circuit_plot_bytes, hamiltonian, qasm_code, trotter_code, num_qubits, order, timestamp))
82
+
83
+ # Write data to Hugging Face dataset if selected
84
+ if write_to_hf:
85
+ store_in_hf_dataset(results_table)
86
+
87
+ # Write data to DuckDB if selected
88
+ if write_to_duckdb:
89
+ store_in_duckdb(results_table)
90
+
91
+ # Function to load results from either DuckDB or Hugging Face dataset
92
+ def load_results(load_from_hf, load_from_duckdb):
93
+ if load_from_hf:
94
+ return load_from_hf_dataset() # Load from HF dataset
95
+ if load_from_duckdb:
96
+ return load_from_duckdb() # Load from DuckDB
97
+
98
+ # Gradio app
99
+ with gr.Blocks() as app:
100
+ gr.Markdown("# Quantum Hamiltonian Generator")
101
+
102
+ with gr.Tab("Generate Hamiltonians"):
103
+ num_hamiltonians = gr.Dropdown(label="Select number of Hamiltonians to generate", choices=[1, 10, 20, 100], value=20)
104
+ qubit_choices = [1, 2, 3, 4, 5, 6]
105
+ selected_qubits = gr.CheckboxGroup(label="Select number of qubits", choices=qubit_choices, value=[1])
106
+ order_choices = [1, 2, 3, 4, 5]
107
+ selected_order = gr.Dropdown(label="Select Trotter order", choices=order_choices, value=1)
108
+
109
+ # Checkboxes for writing to HF dataset and DuckDB
110
+ write_to_hf = gr.Checkbox(label="Write to Hugging Face dataset", value=False)
111
+ write_to_duckdb = gr.Checkbox(label="Write to DuckDB", value=True)
112
+
113
+ generate_button = gr.Button("Generate Hamiltonians")
114
+ status = gr.Markdown("Click 'Generate Hamiltonians' to start the process.")
115
+
116
+ def update_status(num, qubits, order, write_hf, write_duckdb):
117
+ generate_hamiltonians(num, qubits, order, write_hf, write_duckdb)
118
+ return "Data stored as per selection."
119
+
120
+ generate_button.click(update_status, inputs=[num_hamiltonians, selected_qubits, selected_order, write_to_hf, write_to_duckdb], outputs=status)
121
+
122
+ with gr.Tab("View Results"):
123
+ load_from_hf = gr.Checkbox(label="Load from Hugging Face dataset", value=False)
124
+ load_from_duckdb = gr.Checkbox(label="Load from DuckDB", value=True)
125
+
126
+ load_button = gr.Button("Load Results")
127
+ output_display = gr.HTML()
128
+
129
+ load_button.click(load_results, inputs=[load_from_hf, load_from_duckdb], outputs=output_display)
130
+
131
+ app.launch()
build.bat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ docker build -t hf-docker .
2
+
run-d.bat ADDED
@@ -0,0 +1 @@
 
 
1
+ docker run --gpus all --rm -it --shm-size=8gb --memory="16g" --env="DISPLAY" -p 8077:7842 -p 7860:7860 -p 8501:8501 -v %cd%:/home/user/app -v %cd%:/home/user/sharedfolder -v %cd%/.cache:/home/user/.cache hf-docker:latest bash