function
stringlengths
18
3.86k
intent_category
stringlengths
5
24
def save_model(self, save_path): # save model dict with pickle model_dic = { "num_bp1": self.num_bp1, "num_bp2": self.num_bp2, "num_bp3": self.num_bp3, "conv1": self.conv1, "step_conv1": self.step_conv1, "size_pooling1": self.size_pooling1, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conv1, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conv1, "thre_bp2": self.thre_bp2, "thre_bp3": self.thre_bp3, } with open(save_path, "wb") as f: pickle.dump(model_dic, f) print(f"Model saved: {save_path}")
neural_network
def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: model_dic = pickle.load(f) conv_get = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) size_p1 = model_dic.get("size_pooling1") bp1 = model_dic.get("num_bp1") bp2 = model_dic.get("num_bp2") bp3 = model_dic.get("num_bp3") r_w = model_dic.get("rate_weight") r_t = model_dic.get("rate_thre") # create model instance conv_ins = CNN(conv_get, size_p1, bp1, bp2, bp3, r_w, r_t) # modify model parameter conv_ins.w_conv1 = model_dic.get("w_conv1") conv_ins.wkj = model_dic.get("wkj") conv_ins.vji = model_dic.get("vji") conv_ins.thre_conv1 = model_dic.get("thre_conv1") conv_ins.thre_bp2 = model_dic.get("thre_bp2") conv_ins.thre_bp3 = model_dic.get("thre_bp3") return conv_ins
neural_network
def sig(self, x): return 1 / (1 + np.exp(-1 * x))
neural_network
def do_round(self, x): return round(x, 3)
neural_network
def convolute(self, data, convs, w_convs, thre_convs, conv_step): # convolution process size_conv = convs[0] num_conv = convs[1] size_data = np.shape(data)[0] # get the data slice of original image data, data_focus data_focus = [] for i_focus in range(0, size_data - size_conv + 1, conv_step): for j_focus in range(0, size_data - size_conv + 1, conv_step): focus = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(focus) # calculate the feature map of every single kernel, and saved as list of matrix data_featuremap = [] size_feature_map = int((size_data - size_conv) / conv_step + 1) for i_map in range(num_conv): featuremap = [] for i_focus in range(len(data_focus)): net_focus = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(net_focus)) featuremap = np.asmatrix(featuremap).reshape( size_feature_map, size_feature_map ) data_featuremap.append(featuremap) # expanding the data slice to One dimenssion focus1_list = [] for each_focus in data_focus: focus1_list.extend(self.Expand_Mat(each_focus)) focus_list = np.asarray(focus1_list) return focus_list, data_featuremap
neural_network
def pooling(self, featuremaps, size_pooling, pooling_type="average_pool"): # pooling process size_map = len(featuremaps[0]) size_pooled = int(size_map / size_pooling) featuremap_pooled = [] for i_map in range(len(featuremaps)): feature_map = featuremaps[i_map] map_pooled = [] for i_focus in range(0, size_map, size_pooling): for j_focus in range(0, size_map, size_pooling): focus = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(focus)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(focus)) map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled) featuremap_pooled.append(map_pooled) return featuremap_pooled
neural_network
def _expand(self, data): # expanding three dimension data to one dimension list data_expanded = [] for i in range(len(data)): shapes = np.shape(data[i]) data_listed = data[i].reshape(1, shapes[0] * shapes[1]) data_listed = data_listed.getA().tolist()[0] data_expanded.extend(data_listed) data_expanded = np.asarray(data_expanded) return data_expanded
neural_network
def _expand_mat(self, data_mat): # expanding matrix to one dimension list data_mat = np.asarray(data_mat) shapes = np.shape(data_mat) data_expanded = data_mat.reshape(1, shapes[0] * shapes[1]) return data_expanded
neural_network
def _calculate_gradient_from_pool( self, out_map, pd_pool, num_map, size_map, size_pooling ): pd_all = [] i_pool = 0 for i_map in range(num_map): pd_conv1 = np.ones((size_map, size_map)) for i in range(0, size_map, size_pooling): for j in range(0, size_map, size_pooling): pd_conv1[i : i + size_pooling, j : j + size_pooling] = pd_pool[ i_pool ] i_pool = i_pool + 1 pd_conv2 = np.multiply( pd_conv1, np.multiply(out_map[i_map], (1 - out_map[i_map])) ) pd_all.append(pd_conv2) return pd_all
neural_network
def draw_error(): yplot = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(all_mse, "+-") plt.plot(yplot, "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(True, alpha=0.5) plt.show()
neural_network
def predict(self, datas_test): # model predict produce_out = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(datas_test))) for p in range(len(datas_test)): data_test = np.asmatrix(datas_test[p]) data_focus1, data_conved1 = self.convolute( data_test, self.conv1, self.w_conv1, self.thre_conv1, conv_step=self.step_conv1, ) data_pooled1 = self.pooling(data_conved1, self.size_pooling1) data_bp_input = self._expand(data_pooled1) bp_out1 = data_bp_input bp_net_j = bp_out1 * self.vji.T - self.thre_bp2 bp_out2 = self.sig(bp_net_j) bp_net_k = bp_out2 * self.wkj.T - self.thre_bp3 bp_out3 = self.sig(bp_net_k) produce_out.extend(bp_out3.getA().tolist()) res = [list(map(self.do_round, each)) for each in produce_out] return np.asarray(res)
neural_network
def convolution(self, data): # return the data of image after convoluting process so we can check it out data_test = np.asmatrix(data) data_focus1, data_conved1 = self.convolute( data_test, self.conv1, self.w_conv1, self.thre_conv1, conv_step=self.step_conv1, ) data_pooled1 = self.pooling(data_conved1, self.size_pooling1) return data_conved1, data_pooled1
neural_network
def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: # Input values provided for training the model. self.input_array = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( self.input_array.shape[1], 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( 4, 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) # Real output values provided. self.output_array = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. self.predicted_output = numpy.zeros(output_array.shape)
neural_network
def feedforward(self) -> numpy.ndarray: # Layer_between_input_and_first_hidden_layer is the layer connecting the # input nodes with the first hidden layer nodes. self.layer_between_input_and_first_hidden_layer = sigmoid( numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. self.layer_between_second_hidden_layer_and_output = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) ) return self.layer_between_second_hidden_layer_and_output
neural_network
def back_propagation(self) -> None: updated_second_hidden_layer_and_output_layer_weights = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), ) updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( self.layer_between_input_and_first_hidden_layer.T, numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), self.second_hidden_layer_and_output_layer_weights.T, ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ), ) updated_input_layer_and_first_hidden_layer_weights = numpy.dot( self.input_array.T, numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), self.second_hidden_layer_and_output_layer_weights.T, ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ), self.first_hidden_layer_and_second_hidden_layer_weights.T, ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer), ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights )
neural_network
def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: for iteration in range(1, iterations + 1): self.output = self.feedforward() self.back_propagation() if give_loss: loss = numpy.mean(numpy.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}")
neural_network
def predict(self, input_arr: numpy.ndarray) -> int: # Input values for which the predictions are to be made. self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) ) self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) self.layer_between_second_hidden_layer_and_output = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6)
neural_network
def sigmoid(value: numpy.ndarray) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value))
neural_network
def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: return (value) * (1 - (value))
neural_network
def example() -> int: # Input values. test_input = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ), dtype=numpy.float64, ) # True output values for the given input values. output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) # Calling neural network class. neural_network = TwoHiddenLayerNeuralNetwork( input_array=test_input, output_array=output ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=output, iterations=10, give_loss=False) return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64))
neural_network
def sigmoid_function(value: float, deriv: bool = False) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value))
neural_network
def forward_propagation(expected: int, number_propagations: int) -> float: # Random weight weight = float(2 * (random.randint(1, 100)) - 1) for _ in range(number_propagations): # Forward propagation layer_1 = sigmoid_function(INITIAL_VALUE * weight) # How much did we miss? layer_1_error = (expected / 100) - layer_1 # Error delta layer_1_delta = layer_1_error * sigmoid_function(layer_1, True) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_1 * 100
neural_network
def contrast(c: int) -> int: return int(128 + factor * (c - 128))
digital_image_processing
def to_grayscale(blue, green, red): return 0.2126 * red + 0.587 * green + 0.114 * blue
digital_image_processing
def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None): self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir)
digital_image_processing
def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None): if red is not None: self.red = red if green is not None: self.green = green if blue is not None: self.blue = blue if red_edge is not None: self.redEdge = red_edge if nir is not None: self.nir = nir return True
digital_image_processing
def calculation( self, index="", red=None, green=None, blue=None, red_edge=None, nir=None ): self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) funcs = { "ARVI2": self.arv12, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!") return False
digital_image_processing
def arv12(self): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
digital_image_processing
def ccci(self): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) )
digital_image_processing
def cvi(self): return self.nir * (self.red / (self.green**2))
digital_image_processing
def gli(self): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue )
digital_image_processing
def ndvi(self): return (self.nir - self.red) / (self.nir + self.red)
digital_image_processing
def bndvi(self): return (self.nir - self.blue) / (self.nir + self.blue)
digital_image_processing
def red_edge_ndvi(self): return (self.redEdge - self.red) / (self.redEdge + self.red)
digital_image_processing
def gndvi(self): return (self.nir - self.green) / (self.nir + self.green)
digital_image_processing
def gbndvi(self): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) )
digital_image_processing
def grndvi(self): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) )
digital_image_processing
def rbndvi(self): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
digital_image_processing
def pndvi(self): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) )
digital_image_processing
def atsavi(self, x=0.08, a=1.22, b=0.03): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) )
digital_image_processing
def bwdrvi(self): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
digital_image_processing
def ci_green(self): return (self.nir / self.green) - 1
digital_image_processing
def ci_rededge(self): return (self.nir / self.redEdge) - 1
digital_image_processing
def ci(self): return (self.red - self.blue) / self.red
digital_image_processing
def ctvi(self): ndvi = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
digital_image_processing
def gdvi(self): return self.nir - self.green
digital_image_processing
def evi(self): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) )
digital_image_processing
def gemi(self): n = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
digital_image_processing
def gosavi(self, y=0.16): return (self.nir - self.green) / (self.nir + self.green + y)
digital_image_processing
def gsavi(self, n=0.5): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
digital_image_processing
def hue(self): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
digital_image_processing
def ivi(self, a=None, b=None): return (self.nir - b) / (a * self.red)
digital_image_processing
def ipvi(self): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
digital_image_processing
def i(self): return (self.red + self.green + self.blue) / 30.5
digital_image_processing
def rvi(self): return self.nir / self.red
digital_image_processing
def mrvi(self): return (self.rvi() - 1) / (self.rvi() + 1)
digital_image_processing
def m_savi(self): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2
digital_image_processing
def norm_g(self): return self.green / (self.nir + self.red + self.green)
digital_image_processing
def norm_nir(self): return self.nir / (self.nir + self.red + self.green)
digital_image_processing
def norm_r(self): return self.red / (self.nir + self.red + self.green)
digital_image_processing
def ngrdi(self): return (self.green - self.red) / (self.green + self.red)
digital_image_processing
def ri(self): return (self.red - self.green) / (self.red + self.green)
digital_image_processing
def s(self): max_value = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) min_value = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value
digital_image_processing
def _if(self): return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
digital_image_processing
def dvi(self): return self.nir / self.red
digital_image_processing
def tvi(self): return (self.ndvi() + 0.5) ** (1 / 2)
digital_image_processing
def brightness(c: int) -> float: return 128 + level + (c - 128)
digital_image_processing
def test_convert_to_negative(): negative_img = cn.convert_to_negative(img) # assert negative_img array for at least one True assert negative_img.any()
digital_image_processing
def test_change_contrast(): with Image.open("digital_image_processing/image_data/lena_small.jpg") as img: # Work around assertion for response assert str(cc.change_contrast(img, 110)).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" )
digital_image_processing
def test_gen_gaussian_kernel(): resp = canny.gen_gaussian_kernel(9, sigma=1.4) # Assert ambiguous array assert resp.all()
digital_image_processing
def test_canny(): canny_img = imread("digital_image_processing/image_data/lena_small.jpg", 0) # assert ambiguous array for all == True assert canny_img.all() canny_array = canny.canny(canny_img) # assert canny array for at least one True assert canny_array.any()
digital_image_processing
def test_gen_gaussian_kernel_filter(): assert gg.gaussian_filter(gray, 5, sigma=0.9).all()
digital_image_processing
def test_convolve_filter(): # laplace diagonals laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) res = conv.img_convolve(gray, laplace).astype(uint8) assert res.any()
digital_image_processing
def test_median_filter(): assert med.median_filter(gray, 3).any()
digital_image_processing
def test_sobel_filter(): grad, theta = sob.sobel_filter(gray) assert grad.any() and theta.any()
digital_image_processing
def test_sepia(): sepia = sp.make_sepia(img, 20) assert sepia.all()
digital_image_processing
def test_burkes(file_path: str = "digital_image_processing/image_data/lena_small.jpg"): burkes = bs.Burkes(imread(file_path, 1), 120) burkes.process() assert burkes.output_img.any()
digital_image_processing
def test_nearest_neighbour( file_path: str = "digital_image_processing/image_data/lena_small.jpg", ): nn = rs.NearestNeighbour(imread(file_path, 1), 400, 200) nn.process() assert nn.output.any()
digital_image_processing
def convert_to_negative(img): # getting number of pixels in the image pixel_h, pixel_v = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(pixel_h): for j in range(pixel_v): img[i][j] = [255, 255, 255] - img[i][j] return img
digital_image_processing
def im2col(image, block_size): rows, cols = image.shape dst_height = cols - block_size[1] + 1 dst_width = rows - block_size[0] + 1 image_array = zeros((dst_height * dst_width, block_size[1] * block_size[0])) row = 0 for i in range(0, dst_height): for j in range(0, dst_width): window = ravel(image[i : i + block_size[0], j : j + block_size[1]]) image_array[row, :] = window row += 1 return image_array
digital_image_processing
def img_convolve(image, filter_kernel): height, width = image.shape[0], image.shape[1] k_size = filter_kernel.shape[0] pad_size = k_size // 2 # Pads image with the edge values of array. image_tmp = pad(image, pad_size, mode="edge") # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = im2col(image_tmp, (k_size, k_size)) # turn the kernel into shape(k*k, 1) kernel_array = ravel(filter_kernel) # reshape and get the dst image dst = dot(image_array, kernel_array).reshape(height, width) return dst
digital_image_processing
def sobel_filter(image): kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) dst_x = np.abs(img_convolve(image, kernel_x)) dst_y = np.abs(img_convolve(image, kernel_y)) # modify the pix within [0, 255] dst_x = dst_x * 255 / np.max(dst_x) dst_y = dst_y * 255 / np.max(dst_y) dst_xy = np.sqrt((np.square(dst_x)) + (np.square(dst_y))) dst_xy = dst_xy * 255 / np.max(dst_xy) dst = dst_xy.astype(np.uint8) theta = np.arctan2(dst_y, dst_x) return dst, theta
digital_image_processing
def gabor_filter_kernel( ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: ksize = ksize + 1 gabor = np.zeros((ksize, ksize), dtype=np.float32) # each value for y in range(ksize): for x in range(ksize): # distance from center px = x - ksize // 2 py = y - ksize // 2 # degree to radiant _theta = theta / 180 * np.pi cos_theta = np.cos(_theta) sin_theta = np.sin(_theta) # get kernel x _x = cos_theta * px + sin_theta * py # get kernel y _y = -sin_theta * px + cos_theta * py # fill kernel gabor[y, x] = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi) return gabor
digital_image_processing
def get_neighbors_pixel( image: np.ndarray, x_coordinate: int, y_coordinate: int, center: int ) -> int: try: return int(image[x_coordinate][y_coordinate] >= center) except (IndexError, TypeError): return 0
digital_image_processing
def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) -> int: center = image[x_coordinate][y_coordinate] powers = [1, 2, 4, 8, 16, 32, 64, 128] # skip get_neighbors_pixel if center is null if center is None: return 0 # Starting from the top right, assigning value to pixels clockwise binary_values = [ get_neighbors_pixel(image, x_coordinate - 1, y_coordinate + 1, center), get_neighbors_pixel(image, x_coordinate, y_coordinate + 1, center), get_neighbors_pixel(image, x_coordinate - 1, y_coordinate, center), get_neighbors_pixel(image, x_coordinate + 1, y_coordinate + 1, center), get_neighbors_pixel(image, x_coordinate + 1, y_coordinate, center), get_neighbors_pixel(image, x_coordinate + 1, y_coordinate - 1, center), get_neighbors_pixel(image, x_coordinate, y_coordinate - 1, center), get_neighbors_pixel(image, x_coordinate - 1, y_coordinate - 1, center), ] # Converting the binary value to decimal. return sum( binary_value * power for binary_value, power in zip(binary_values, powers) )
digital_image_processing
def vec_gaussian(img: np.ndarray, variance: float) -> np.ndarray: # For applying gaussian function for each element in matrix. sigma = math.sqrt(variance) cons = 1 / (sigma * math.sqrt(2 * math.pi)) return cons * np.exp(-((img / sigma) ** 2) * 0.5)
digital_image_processing
def get_slice(img: np.ndarray, x: int, y: int, kernel_size: int) -> np.ndarray: half = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1]
digital_image_processing
def get_gauss_kernel(kernel_size: int, spatial_variance: float) -> np.ndarray: # Creates a gaussian kernel of given dimension. arr = np.zeros((kernel_size, kernel_size)) for i in range(0, kernel_size): for j in range(0, kernel_size): arr[i, j] = math.sqrt( abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2 ) return vec_gaussian(arr, spatial_variance)
digital_image_processing
def bilateral_filter( img: np.ndarray, spatial_variance: float, intensity_variance: float, kernel_size: int, ) -> np.ndarray: img2 = np.zeros(img.shape) gauss_ker = get_gauss_kernel(kernel_size, spatial_variance) size_x, size_y = img.shape for i in range(kernel_size // 2, size_x - kernel_size // 2): for j in range(kernel_size // 2, size_y - kernel_size // 2): img_s = get_slice(img, i, j, kernel_size) img_i = img_s - img_s[kernel_size // 2, kernel_size // 2] img_ig = vec_gaussian(img_i, intensity_variance) weights = np.multiply(gauss_ker, img_ig) vals = np.multiply(img_s, weights) val = np.sum(vals) / np.sum(weights) img2[i, j] = val return img2
digital_image_processing
def parse_args(args: list) -> tuple: filename = args[1] if args[1:] else "../image_data/lena.jpg" spatial_variance = float(args[2]) if args[2:] else 1.0 intensity_variance = float(args[3]) if args[3:] else 1.0 if args[4:]: kernel_size = int(args[4]) kernel_size = kernel_size + abs(kernel_size % 2 - 1) else: kernel_size = 5 return filename, spatial_variance, intensity_variance, kernel_size
digital_image_processing
def median_filter(gray_img, mask=3): # set image borders bd = int(mask / 2) # copy image size median_img = zeros_like(gray_img) for i in range(bd, gray_img.shape[0] - bd): for j in range(bd, gray_img.shape[1] - bd): # get mask according with mask kernel = ravel(gray_img[i - bd : i + bd + 1, j - bd : j + bd + 1]) # calculate mask median median = sort(kernel)[int8(divide((multiply(mask, mask)), 2) + 1)] median_img[i, j] = median return median_img
digital_image_processing
def gen_gaussian_kernel(k_size, sigma): center = k_size // 2 x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center] g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma))) return g
digital_image_processing
def gaussian_filter(image, k_size, sigma): height, width = image.shape[0], image.shape[1] # dst image height and width dst_height = height - k_size + 1 dst_width = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = zeros((dst_height * dst_width, k_size * k_size)) row = 0 for i, j in product(range(dst_height), range(dst_width)): window = ravel(image[i : i + k_size, j : j + k_size]) image_array[row, :] = window row += 1 # turn the kernel into shape(k*k, 1) gaussian_kernel = gen_gaussian_kernel(k_size, sigma) filter_array = ravel(gaussian_kernel) # reshape and get the dst image dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8) return dst
digital_image_processing
def rgb2gray(rgb: np.array) -> np.array: r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b
digital_image_processing
def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255)
digital_image_processing
def erosion(image: np.array, kernel: np.array) -> np.array: output = np.zeros_like(image) image_padded = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image # Iterate over image & apply kernel for x in range(image.shape[1]): for y in range(image.shape[0]): summation = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() output[y, x] = int(summation == 5) return output
digital_image_processing
def rgb2gray(rgb: np.array) -> np.array: r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b
digital_image_processing
def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255)
digital_image_processing
def dilation(image: np.array, kernel: np.array) -> np.array: output = np.zeros_like(image) image_padded = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image # Iterate over image & apply kernel for x in range(image.shape[1]): for y in range(image.shape[0]): summation = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() output[y, x] = int(summation > 0) return output
digital_image_processing
def __init__(self): self.img = "" self.original_image = "" self.last_list = [] self.rem = 0 self.L = 256 self.sk = 0 self.k = 0 self.number_of_rows = 0 self.number_of_cols = 0
digital_image_processing