From a7c7354946a3f9c63f8dda32a871022985a8fd83 Mon Sep 17 00:00:00 2001 From: zhang Date: Sun, 1 Dec 2019 17:21:30 +0800 Subject: =?UTF-8?q?=E6=B7=BB=E5=8A=A0image=20similarity?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cv/.DS_Store | Bin 0 -> 6148 bytes cv/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 152 bytes cv/image_similarity/__init__.py | 0 cv/image_similarity/cnn/__init__.py | 0 cv/image_similarity/cnn/build_models.py | 238 +++++++++++++++++++++ cv/image_similarity/cnn/deep_ranking.py | 52 +++++ cv/image_similarity/cnn/feature_similarity.py | 71 ++++++ .../cnn/images/Anastasia_Myskina_0001.jpg | Bin 0 -> 18187 bytes .../cnn/images/Anastasia_Myskina_0002.jpg | Bin 0 -> 11627 bytes .../cnn/images/Anastasia_Myskina_0003.jpg | Bin 0 -> 13361 bytes cv/image_similarity/cnn/images/dx.jpeg | Bin 0 -> 24383 bytes cv/image_similarity/cnn/images/dx2.jpeg | Bin 0 -> 8629 bytes cv/image_similarity/cnn/inception_v3_withtop.png | Bin 0 -> 4579804 bytes cv/image_similarity/cnn/vgg16_with_top.png | Bin 0 -> 216528 bytes cv/image_similarity/cnn/vgg16_without_top.png | Bin 0 -> 176339 bytes cv/image_similarity/cnn/xception_with_top.png | Bin 0 -> 2002672 bytes cv/image_similarity/dhash_detect.py | 92 ++++++++ cv/image_similarity/imgs/forest-copyright.jpg | Bin 0 -> 103997 bytes cv/image_similarity/imgs/forest-high-rotate.jpg | Bin 0 -> 59154 bytes cv/image_similarity/imgs/forest-high.jpg | Bin 0 -> 67685 bytes cv/image_similarity/imgs/lena/blur-lena.png | Bin 0 -> 79166 bytes cv/image_similarity/imgs/lena/origin-lena.png | Bin 0 -> 99742 bytes cv/image_similarity/imgs/lena/resize-lena.png | Bin 0 -> 10744 bytes cv/image_similarity/imgs/lena/rotate-lena.png | Bin 0 -> 112435 bytes cv/image_similarity/imgs/lena/shift-lena.png | Bin 0 -> 96266 bytes .../imgs/tumblr_ndyfnr7lk21tubinno1_1280.jpg | Bin 0 -> 66570 bytes .../imgs/tumblr_ndyfq386o41tubinno1_1280.jpg | Bin 0 -> 69166 bytes 27 files changed, 453 insertions(+) create mode 100644 cv/.DS_Store create mode 100644 cv/__pycache__/__init__.cpython-37.pyc create mode 100644 cv/image_similarity/__init__.py create mode 100644 cv/image_similarity/cnn/__init__.py create mode 100644 cv/image_similarity/cnn/build_models.py create mode 100644 cv/image_similarity/cnn/deep_ranking.py create mode 100644 cv/image_similarity/cnn/feature_similarity.py create mode 100644 cv/image_similarity/cnn/images/Anastasia_Myskina_0001.jpg create mode 100644 cv/image_similarity/cnn/images/Anastasia_Myskina_0002.jpg create mode 100644 cv/image_similarity/cnn/images/Anastasia_Myskina_0003.jpg create mode 100644 cv/image_similarity/cnn/images/dx.jpeg create mode 100644 cv/image_similarity/cnn/images/dx2.jpeg create mode 100644 cv/image_similarity/cnn/inception_v3_withtop.png create mode 100644 cv/image_similarity/cnn/vgg16_with_top.png create mode 100644 cv/image_similarity/cnn/vgg16_without_top.png create mode 100644 cv/image_similarity/cnn/xception_with_top.png create mode 100644 cv/image_similarity/dhash_detect.py create mode 100644 cv/image_similarity/imgs/forest-copyright.jpg create mode 100644 cv/image_similarity/imgs/forest-high-rotate.jpg create mode 100644 cv/image_similarity/imgs/forest-high.jpg create mode 100644 cv/image_similarity/imgs/lena/blur-lena.png create mode 100644 cv/image_similarity/imgs/lena/origin-lena.png create mode 100644 cv/image_similarity/imgs/lena/resize-lena.png create mode 100644 cv/image_similarity/imgs/lena/rotate-lena.png create mode 100644 cv/image_similarity/imgs/lena/shift-lena.png create mode 100644 cv/image_similarity/imgs/tumblr_ndyfnr7lk21tubinno1_1280.jpg create mode 100644 cv/image_similarity/imgs/tumblr_ndyfq386o41tubinno1_1280.jpg diff --git a/cv/.DS_Store b/cv/.DS_Store new file mode 100644 index 0000000..ebab850 Binary files /dev/null and b/cv/.DS_Store differ diff --git a/cv/__pycache__/__init__.cpython-37.pyc b/cv/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..a75887b Binary files /dev/null and b/cv/__pycache__/__init__.cpython-37.pyc differ diff --git a/cv/image_similarity/__init__.py b/cv/image_similarity/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cv/image_similarity/cnn/__init__.py b/cv/image_similarity/cnn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cv/image_similarity/cnn/build_models.py b/cv/image_similarity/cnn/build_models.py new file mode 100644 index 0000000..e55e4f7 --- /dev/null +++ b/cv/image_similarity/cnn/build_models.py @@ -0,0 +1,238 @@ +import torch +import torch.nn as nn +from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, \ + AdaptiveAvgPool2d, Sequential, Module +from collections import namedtuple + + +# Support: ['IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152'] + + +class Flatten(Module): + def forward(self, input): + return input.view(input.size(0), -1) + + +def l2_norm(input, axis=1): + norm = torch.norm(input, 2, axis, True) + output = torch.div(input, norm) + + return output + + +class SEModule(Module): + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = AdaptiveAvgPool2d(1) + self.fc1 = Conv2d( + channels, channels // reduction, kernel_size=1, padding=0, bias=False) + + nn.init.xavier_uniform_(self.fc1.weight.data) + + self.relu = ReLU(inplace=True) + self.fc2 = Conv2d( + channels // reduction, channels, kernel_size=1, padding=0, bias=False) + + self.sigmoid = Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + + return module_input * x + + +class bottleneck_IR(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class bottleneck_IR_SE(Module): + def __init__(self, in_channel, depth, stride): + super(bottleneck_IR_SE, self).__init__() + if in_channel == depth: + self.shortcut_layer = MaxPool2d(1, stride) + else: + self.shortcut_layer = Sequential( + Conv2d(in_channel, depth, (1, 1), stride, bias=False), + BatchNorm2d(depth)) + self.res_layer = Sequential( + BatchNorm2d(in_channel), + Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), + PReLU(depth), + Conv2d(depth, depth, (3, 3), stride, 1, bias=False), + BatchNorm2d(depth), + SEModule(depth, 16) + ) + + def forward(self, x): + shortcut = self.shortcut_layer(x) + res = self.res_layer(x) + + return res + shortcut + + +class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): + '''A named tuple describing a ResNet block.''' + + +def get_block(in_channel, depth, num_units, stride=2): + + return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] + + +def get_blocks(num_layers): + if num_layers == 50: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=4), + get_block(in_channel=128, depth=256, num_units=14), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 100: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=13), + get_block(in_channel=128, depth=256, num_units=30), + get_block(in_channel=256, depth=512, num_units=3) + ] + elif num_layers == 152: + blocks = [ + get_block(in_channel=64, depth=64, num_units=3), + get_block(in_channel=64, depth=128, num_units=8), + get_block(in_channel=128, depth=256, num_units=36), + get_block(in_channel=256, depth=512, num_units=3) + ] + + return blocks + + +class Backbone(Module): + def __init__(self, input_size, num_layers, mode='ir'): + super(Backbone, self).__init__() + assert input_size[0] in [112, 224], "input_size should be [112, 112] or [224, 224]" + assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" + assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" + blocks = get_blocks(num_layers) + if mode == 'ir': + unit_module = bottleneck_IR + elif mode == 'ir_se': + unit_module = bottleneck_IR_SE + self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), + BatchNorm2d(64), + PReLU(64)) + if input_size[0] == 112: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(), + Flatten(), + Linear(512 * 7 * 7, 512), + BatchNorm1d(512)) + # 224, 224 + else: + self.output_layer = Sequential(BatchNorm2d(512), + Dropout(), + Flatten(), + Linear(512 * 14 * 14, 512), + BatchNorm1d(512)) + + modules = [] + for block in blocks: + for bottleneck in block: + modules.append( + unit_module(bottleneck.in_channel, + bottleneck.depth, + bottleneck.stride)) + self.body = Sequential(*modules) + + self._initialize_weights() + + def forward(self, x): + x = self.input_layer(x) + x = self.body(x) + x = self.output_layer(x) + + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm1d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + + +def IR_50(input_size): + """Constructs a ir-50 model. + """ + model = Backbone(input_size, 50, 'ir') + + return model + + +def IR_101(input_size): + """Constructs a ir-101 model. + """ + model = Backbone(input_size, 100, 'ir') + + return model + + +def IR_152(input_size): + """Constructs a ir-152 model. + """ + model = Backbone(input_size, 152, 'ir') + + return model + + +def IR_SE_50(input_size): + """Constructs a ir_se-50 model. + """ + model = Backbone(input_size, 50, 'ir_se') + + return model + + +def IR_SE_101(input_size): + """Constructs a ir_se-101 model. + """ + model = Backbone(input_size, 100, 'ir_se') + + return model + + +def IR_SE_152(input_size): + """Constructs a ir_se-152 model. + """ + model = Backbone(input_size, 152, 'ir_se') + + return model diff --git a/cv/image_similarity/cnn/deep_ranking.py b/cv/image_similarity/cnn/deep_ranking.py new file mode 100644 index 0000000..96d8025 --- /dev/null +++ b/cv/image_similarity/cnn/deep_ranking.py @@ -0,0 +1,52 @@ + +import tensorflow as tf + +from tensorflow.keras.applications.vgg16 import VGG16 +from tensorflow.keras.applications.inception_v3 import InceptionV3 +from tensorflow.keras.applications import Xception, ResNet50 + + +from tensorflow.keras.utils import plot_model +from tensorflow.keras.layers import * +from tensorflow.keras import backend as K + +from keras.applications.vgg16 import VGG16 +from keras.applications import Xception + +vgg_with_top = VGG16(include_top=True) +# plot_model(vgg_with_top, to_file='vgg16_with_top.png', show_shapes=True) + +vgg_without_top = VGG16(include_top=False) +# plot_model(vgg_without_top, to_file='vgg16_without_top.png', show_shapes=True) + +inception = InceptionV3() + +# plot_model(inception, to_file='inception_v3_withtop.png', show_shapes=True) + +xception = Xception() +# plot_model(xception, to_file='xception_with_top.png', show_shapes=True) +resnet = ResNet50() +plot_model(resnet, to_file='resnet_with_top.png', show_shapes=True) + + +# first_input = Input(shape=(224, 224, 3)) +# first_conv = Conv2D(96, kernel_size=(8, 8), strides=(16, 16), padding='same')(first_input) +# print(first_conv) +# first_max = MaxPool2D(pool_size=(3, 3), strides=(4, 4), padding='same')(first_conv) +# print(first_max) +# first_max = Flatten()(first_max) +# first_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_max) +# +# second_input = Input(shape=(224, 224, 3)) +# second_conv = Conv2D(96, kernel_size=(8, 8), strides=(32, 32), padding='same')(second_input) +# print(second_conv) +# second_max = MaxPool2D(pool_size=(7, 7), strides=(2, 2), padding='same')(second_conv) +# print(second_max) +# second_max = Flatten()(second_max) +# second_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_max) + +# merge_one = concatenate([first_max, second_max]) + +# print(first_max) +# print(second_max) +# print(merge_one) \ No newline at end of file diff --git a/cv/image_similarity/cnn/feature_similarity.py b/cv/image_similarity/cnn/feature_similarity.py new file mode 100644 index 0000000..ff10250 --- /dev/null +++ b/cv/image_similarity/cnn/feature_similarity.py @@ -0,0 +1,71 @@ +import random +import torch +from cv.image_similarity.cnn.build_models import IR_50, IR_101, IR_152 +import torchvision +import os +from PIL import Image + + +to_torch_tensor = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize([0.5, 0.5, 0.5],[0.5, 0.5, 0.5])]) + + + +def l2_norm(input, axis=1): + norm = torch.norm(input, 2, axis, True) + output = torch.div(input, norm) + + return output + + +def init_model(model, param, device): + m = model([112, 112]) + m.eval() + m.to(device) + m.load_state_dict(torch.load(param, map_location=torch.device('cpu'))) + return m + + +def get_model_pool(device): + model_pool = [] + # double + model_pool.append(init_model(IR_50, 'models/backbone_ir50_ms1m_epoch120.pth', device)) + # model_pool.append(init_model(IR_50, 'models/backbone_ir50_ms1m_epoch120.pth', device)) + # + # model_pool.append(init_model(IR_50, 'models/Backbone_IR_50_LFW.pth', device)) + # model_pool.append(init_model(IR_101, 'models/Backbone_IR_101_Batch_108320.pth', device)) + # model_pool.append(init_model(IR_152, 'models/Backbone_IR_152_MS1M_Epoch_112.pth', device)) + return model_pool + + +def get_model(device): + return init_model(IR_50, 'models/backbone_ir50_ms1m_epoch120.pth', device) + + +device = torch.device('cpu') +model = get_model(device) +print('----models load over----') +images = os.listdir('../imgs/lena') + + +vectors = [] + +for img in images: + print(img) + img = Image.open('../imgs/lena/' + img).convert('RGB') + # print(dir(img)) + print(img.size, img.mode) + img = to_torch_tensor(img.resize((112, 112), Image.ANTIALIAS)) + img = img.unsqueeze_(0).to(device) + feature = model(img) + vectors.append(l2_norm(feature).detach_()) + +print('----vectors calculate over----') + + +for i in range(len(vectors)): + for j in range(len(vectors)): + # consine distance + dist = (vectors[i]*vectors[j]).sum().item() + print(images[i], images[j], dist) + print('-------------') diff --git a/cv/image_similarity/cnn/images/Anastasia_Myskina_0001.jpg b/cv/image_similarity/cnn/images/Anastasia_Myskina_0001.jpg new file mode 100644 index 0000000..ae7f5b8 Binary files /dev/null and b/cv/image_similarity/cnn/images/Anastasia_Myskina_0001.jpg differ diff --git a/cv/image_similarity/cnn/images/Anastasia_Myskina_0002.jpg b/cv/image_similarity/cnn/images/Anastasia_Myskina_0002.jpg new file mode 100644 index 0000000..cce9bfc Binary files /dev/null and b/cv/image_similarity/cnn/images/Anastasia_Myskina_0002.jpg differ diff --git a/cv/image_similarity/cnn/images/Anastasia_Myskina_0003.jpg b/cv/image_similarity/cnn/images/Anastasia_Myskina_0003.jpg new file mode 100644 index 0000000..c2d7d1e Binary files /dev/null and b/cv/image_similarity/cnn/images/Anastasia_Myskina_0003.jpg differ diff --git a/cv/image_similarity/cnn/images/dx.jpeg b/cv/image_similarity/cnn/images/dx.jpeg new file mode 100644 index 0000000..8a7475c Binary files /dev/null and b/cv/image_similarity/cnn/images/dx.jpeg differ diff --git a/cv/image_similarity/cnn/images/dx2.jpeg b/cv/image_similarity/cnn/images/dx2.jpeg new file mode 100644 index 0000000..e21642b Binary files /dev/null and b/cv/image_similarity/cnn/images/dx2.jpeg differ diff --git a/cv/image_similarity/cnn/inception_v3_withtop.png b/cv/image_similarity/cnn/inception_v3_withtop.png new file mode 100644 index 0000000..3b9f939 Binary files /dev/null and b/cv/image_similarity/cnn/inception_v3_withtop.png differ diff --git a/cv/image_similarity/cnn/vgg16_with_top.png b/cv/image_similarity/cnn/vgg16_with_top.png new file mode 100644 index 0000000..ba1af31 Binary files /dev/null and b/cv/image_similarity/cnn/vgg16_with_top.png differ diff --git a/cv/image_similarity/cnn/vgg16_without_top.png b/cv/image_similarity/cnn/vgg16_without_top.png new file mode 100644 index 0000000..4320644 Binary files /dev/null and b/cv/image_similarity/cnn/vgg16_without_top.png differ diff --git a/cv/image_similarity/cnn/xception_with_top.png b/cv/image_similarity/cnn/xception_with_top.png new file mode 100644 index 0000000..e72ad68 Binary files /dev/null and b/cv/image_similarity/cnn/xception_with_top.png differ diff --git a/cv/image_similarity/dhash_detect.py b/cv/image_similarity/dhash_detect.py new file mode 100644 index 0000000..5b68aa2 --- /dev/null +++ b/cv/image_similarity/dhash_detect.py @@ -0,0 +1,92 @@ + + +from PIL import Image +import numpy as np +import imagehash + + +def image_to_dhash(image_path, hash_size=8): + image = Image.open(image_path) + # grey, resize: 9*8 + image = image.convert('L').resize((hash_size+1, hash_size), Image.ANTIALIAS) + pixels = np.asarray(image) + diff = pixels[:, 1:] > pixels[:, :-1] + # dhash = sum(2**(i%8) for i, v in enumerate(diff.flatten()) if v) + return diff + +# def image_to_phash(): + + +def binary_to_hex(arr): + bit_str = ''.join(str(b) for b in 1*arr.flatten()) + print(bit_str) + width = int(np.ceil(len(bit_str)/4)) + return '{:0>{width}x}'.format(int(bit_str, 2), width=width) + + +def hamming_distance(dhash1, dhash2, f=64): + if isinstance(dhash1, str): + dhash1 = int(dhash1, base=16) + if isinstance(dhash2, str): + dhash2 = int(dhash2, base=16) + x = (dhash1 ^ dhash2) & ((1 << f) - 1) + # 数1的个数 + ans = 0 + while x: + ans += 1 + x &= x-1 + return ans + + +def hamming_dist(dhash1, dhash2): + difference = (int(dhash1, 16)) ^ (int(dhash2, 16)) + return bin(difference).count("1") + + +def hash_functions_eval(hash_function=imagehash.dhash): + lena_1 = hash_function(Image.open('./imgs/lena/origin-lena.png')) + lena_2 = hash_function(Image.open('./imgs/lena/blur-lena.png')) + lena_3 = hash_function(Image.open('./imgs/lena/resize-lena.png')) + lena_4 = hash_function(Image.open('./imgs/lena/shift-lena.png')) + forest = hash_function(Image.open('./imgs/forest-high.jpg')) + + hashs = [lena_1, lena_2, lena_3, lena_4, forest] + for i in range(len(hashs)): + for j in range(i + 1, len(hashs)): + print(hash_function.__name__, i, j, hashs[i] - hashs[j]) + + +def rotate_eval(img_path='./imgs/lena/origin-lena.png', hash_function=imagehash.dhash): + origin_image = Image.open(img_path) + origin_hash = hash_function(origin_image) + for r in range(1, 180, 10): + rotate_hash = hash_function(origin_image.rotate(r)) + print(hash_function.__name__, r, origin_hash - rotate_hash) + + +if __name__ == '__main__': + # hash_functions_eval(imagehash.average_hash) + # print('----------------------') + # + # hash_functions_eval(imagehash.phash) + # print('----------------------') + # + # hash_functions_eval(imagehash.dhash) + # print('----------------------') + # + # hash_functions_eval(imagehash.whash) + + rotate_eval(hash_function=imagehash.phash) + +class Solution: + def str_to_hex(self, s): + return ''.join([hex(ord(c)).replace('0x', '') for c in s]) + def toHexspeak(self, num: str) -> str: + h = self.str_to_hex(num) + h = h.replace('1', 'I').replace('0', 'O') + s = '23456789' + for c in s: + if c in h: + return "ERROR" + return h +Solution().toHexspeak("257") diff --git a/cv/image_similarity/imgs/forest-copyright.jpg b/cv/image_similarity/imgs/forest-copyright.jpg new file mode 100644 index 0000000..7332276 Binary files /dev/null and b/cv/image_similarity/imgs/forest-copyright.jpg differ diff --git a/cv/image_similarity/imgs/forest-high-rotate.jpg b/cv/image_similarity/imgs/forest-high-rotate.jpg new file mode 100644 index 0000000..28baeb4 Binary files /dev/null and b/cv/image_similarity/imgs/forest-high-rotate.jpg differ diff --git a/cv/image_similarity/imgs/forest-high.jpg b/cv/image_similarity/imgs/forest-high.jpg new file mode 100644 index 0000000..d0e0594 Binary files /dev/null and b/cv/image_similarity/imgs/forest-high.jpg differ diff --git a/cv/image_similarity/imgs/lena/blur-lena.png b/cv/image_similarity/imgs/lena/blur-lena.png new file mode 100644 index 0000000..b1c2a18 Binary files /dev/null and b/cv/image_similarity/imgs/lena/blur-lena.png differ diff --git a/cv/image_similarity/imgs/lena/origin-lena.png b/cv/image_similarity/imgs/lena/origin-lena.png new file mode 100644 index 0000000..e40ccb1 Binary files /dev/null and b/cv/image_similarity/imgs/lena/origin-lena.png differ diff --git a/cv/image_similarity/imgs/lena/resize-lena.png b/cv/image_similarity/imgs/lena/resize-lena.png new file mode 100644 index 0000000..abcad3e Binary files /dev/null and b/cv/image_similarity/imgs/lena/resize-lena.png differ diff --git a/cv/image_similarity/imgs/lena/rotate-lena.png b/cv/image_similarity/imgs/lena/rotate-lena.png new file mode 100644 index 0000000..1e7dd31 Binary files /dev/null and b/cv/image_similarity/imgs/lena/rotate-lena.png differ diff --git a/cv/image_similarity/imgs/lena/shift-lena.png b/cv/image_similarity/imgs/lena/shift-lena.png new file mode 100644 index 0000000..7eb2382 Binary files /dev/null and b/cv/image_similarity/imgs/lena/shift-lena.png differ diff --git a/cv/image_similarity/imgs/tumblr_ndyfnr7lk21tubinno1_1280.jpg b/cv/image_similarity/imgs/tumblr_ndyfnr7lk21tubinno1_1280.jpg new file mode 100644 index 0000000..8e60729 Binary files /dev/null and b/cv/image_similarity/imgs/tumblr_ndyfnr7lk21tubinno1_1280.jpg differ diff --git a/cv/image_similarity/imgs/tumblr_ndyfq386o41tubinno1_1280.jpg b/cv/image_similarity/imgs/tumblr_ndyfq386o41tubinno1_1280.jpg new file mode 100644 index 0000000..ef39949 Binary files /dev/null and b/cv/image_similarity/imgs/tumblr_ndyfq386o41tubinno1_1280.jpg differ -- cgit v1.2.3