-
Notifications
You must be signed in to change notification settings - Fork 17
/
helper_q_tool.py
121 lines (99 loc) · 4.2 KB
/
helper_q_tool.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import pickle
import pennylane as qml
from pennylane import numpy as np
from pennylane.templates import RandomLayers
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
from scipy.io import wavfile
from tqdm import tqdm
import warnings
import qiskit
# from qiskit.providers.aer.noise.device import basic_device_noise_model
n_w = 4 # numbers of wires def 4
noise_mode = False # for running at QPU
if noise_mode == True:
dev = qml.device('qiskit.aer', wires= n_w, noise_model=noise_model)
else:
dev = qml.device("default.qubit", wires= n_w)
n_layers = 1
# Random circuit parameters
rand_params = np.random.uniform(high= 2 * np.pi, size=(n_layers, n_w)) # def 2, n_w = 4
@qml.qnode(dev)
def circuit(phi=None):
# Encoding of 4 classical input values
for j in range(n_w):
qml.RY(np.pi * phi[j], wires=j)
# Random quantum circuit
RandomLayers(rand_params, wires=list(range(n_w)))
# Measurement producing 4 classical output values
return [qml.expval(qml.PauliZ(j)) for j in range(n_w)]
def quanv(image, kr=2):
h_feat, w_feat, ch_n = image.shape
"""Convolves the input speech with many applications of the same quantum circuit."""
out = np.zeros((h_feat//kr, w_feat//kr, n_w))
# Loop over the coordinates of the top-left pixel of 2X2 squares
for j in range(0, h_feat, kr):
for k in range(0, w_feat, kr):
# Process a squared 2x2 region of the image with a quantum circuit
q_results = circuit(
# kernal 3 ## phi=[image[j, k, 0], image[j, k + 1, 0], image[j, k + 2, 0], image[j + 1, k, 0],
# image[j + 1, k + 1, 0], image[j + 1, k +2 , 0],image[j+2, k, 0], image[j+2, k+1, 0], image[j+2, k+2, 0]]
phi=[image[j, k, 0], image[j, k + 1, 0], image[j + 1, k, 0], image[j + 1, k + 1, 0]]
)
# Assign expectation values to different channels of the output pixel (j/2, k/2)
for c in range(n_w):
out[j // kr, k // kr, c] = q_results[c]
return out
def gen_qspeech(x_train, x_valid, kr): # kernal size = 2x2 or 3x3
q_train = []
print("Quantum pre-processing of train Speech:")
for idx, img in enumerate(x_train):
print("{}/{} ".format(idx + 1, len(x_train)), end="\r")
q_train.append(quanv(img, kr))
q_train = np.asarray(q_train)
q_valid = []
print("\nQuantum pre-processing of test Speech:")
for idx, img in enumerate(x_valid):
print("{}/{} ".format(idx + 1, len(x_valid)), end="\r")
q_valid.append(quanv(img, kr))
q_valid = np.asarray(q_valid)
return q_train, q_valid
import matplotlib.pyplot as plt
import librosa
import librosa.display
def plot_acc_loss(q_history, x_history, v_history, data_ix):
plt.figure()
plt.style.use("seaborn")
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 9))
ax1.plot(v_history.history["val_accuracy"], "-ok", label="Baseline Attn-BiLSTM")
ax1.plot(q_history.history["val_accuracy"], "-ob", label="With Quanv Layer")
ax1.plot(x_history.history["val_accuracy"], "-og", label="With Conv Layer")
ax1.set_ylabel("Accuracy")
ax1.set_ylim([0, 1])
ax1.set_xlabel("Epoch")
ax1.legend()
ax2.plot(v_history.history["val_loss"], "-ok", label="Baseline Attn-BiLSTM")
ax2.plot(q_history.history["val_loss"], "-ob", label="With Quanv Layer")
ax2.plot(x_history.history["val_loss"], "-og", label="With Conv Layer")
ax2.set_ylabel("Loss")
#ax2.set_ylim(top=5.5)
ax2.set_xlabel("Epoch")
ax2.legend()
plt.tight_layout()
plt.savefig("images/"+ data_ix +"_conv_speech_loss.png")
def show_speech(x_train, q_train, use_ch, tmp = "tmp.png"):
plt.figure()
plt.subplot(5, 1, 1)
if use_ch != True:
librosa.display.specshow(librosa.power_to_db(x_train[0,:,:,0], ref=np.max))
else:
librosa.display.specshow(librosa.power_to_db(x_train[0,:,:], ref=np.max))
plt.title('Input Speech')
for i in range(4):
plt.subplot(5, 1, i+2)
librosa.display.specshow(librosa.power_to_db(q_train[0,:,:,i], ref=np.max))
plt.title('Channel '+str(i+1)+': Quantum Compressed Speech')
plt.tight_layout()
plt.savefig("images/speech_encoder_" + tmp)