summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.idea/misc.xml3
-rw-r--r--cv/learn_cv/video.py36
-rw-r--r--cv/seg/human_seg.py33
-rw-r--r--cv/seg/img2video.py30
-rw-r--r--cv/seg/utils.py128
-rw-r--r--cv/seg/video_seg/processing.py58
-rwxr-xr-xcv/seg/video_seg/vtest.avibin0 -> 8131690 bytes
-rw-r--r--cv/tracker/object_tracker.py35
-rw-r--r--fine_tune/dog_breed.py169
-rw-r--r--hpc/mlti_process.py18
-rw-r--r--hpc/mlti_threads.py33
-rw-r--r--hpc/mpi_demo.py (renamed from hpc/demo.py)0
-rw-r--r--learn_torch/learn_nn/custom_module.py21
-rw-r--r--learn_torch/seq/char_rnn.py66
-rw-r--r--rl/gym_demo/carl_pole.py47
-rw-r--r--vis/animation/text_animation.py18
-rw-r--r--web/flask_proj/app.py14
-rw-r--r--web/flask_proj/templates/index.html20
-rw-r--r--web/html_basics.html10
19 files changed, 737 insertions, 2 deletions
diff --git a/.idea/misc.xml b/.idea/misc.xml
index 65531ca..7a5c067 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
+ <component name="PyCharmProfessionalAdvertiser">
+ <option name="shown" value="true" />
+ </component>
</project> \ No newline at end of file
diff --git a/cv/learn_cv/video.py b/cv/learn_cv/video.py
new file mode 100644
index 0000000..703a2bf
--- /dev/null
+++ b/cv/learn_cv/video.py
@@ -0,0 +1,36 @@
+import numpy as np
+import cv2
+
+
+def load_video(video):
+ cap = cv2.VideoCapture(video)
+ if not cap.isOpened():
+ print("Cannot open camera")
+ exit()
+ print(cap.get(cv2.CAP_PROP_FRAME_WIDTH),
+ cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
+ cap.get(cv2.CAP_PROP_FPS))
+ while True:
+ # Capture frame-by-frame
+ ret, frame = cap.read()
+ # if frame is read correctly ret is True
+ if not ret:
+ print("Can't receive frame (stream end?). Exiting ...")
+ break
+ # Our operations on the frame come here
+ # gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
+ # Display the resulting frame
+ cv2.imshow('frame', frame)
+ print(frame.shape)
+ if cv2.waitKey(1) == ord('q'):
+ break
+ # When everything done, release the capture
+ cap.release()
+ cv2.destroyAllWindows()
+
+
+
+
+if __name__ == '__main__':
+ load_video('../data/vtest.avi')
+
diff --git a/cv/seg/human_seg.py b/cv/seg/human_seg.py
new file mode 100644
index 0000000..106b5c7
--- /dev/null
+++ b/cv/seg/human_seg.py
@@ -0,0 +1,33 @@
+# 1、导入模块
+import os
+import paddlehub as hub
+import numpy as np
+from collections import Counter
+import cv2
+import matplotlib.pyplot as plt
+
+
+path = './data/'
+files = os.listdir(path)
+imgs = []
+
+
+for i in files:
+ imgs.append(path + i)
+
+cv2.imshow('', cv2.imread(imgs[0]))
+
+
+
+
+# 2、加载模型
+humanseg = hub.Module(name='deeplabv3p_xception65_humanseg')
+
+
+
+results = humanseg.segmentation(data={'image':imgs}, visualization=True)
+unique, counts = np.unique(results[0]['data'], return_counts=True)
+print(dict(zip(unique, counts)))
+
+print()
+
diff --git a/cv/seg/img2video.py b/cv/seg/img2video.py
new file mode 100644
index 0000000..c824c9c
--- /dev/null
+++ b/cv/seg/img2video.py
@@ -0,0 +1,30 @@
+import cv2
+import os
+from tqdm import tqdm
+
+image_folder = './video_seg/v2j'
+video_name = 'video.avi'
+
+images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
+frame = cv2.imread(os.path.join(image_folder, images[0]))
+height, width, layers = frame.shape
+
+video = cv2.VideoWriter(video_name, 0, 10, (width,height))
+images.sort(key=lambda s: int(s.split('.')[0].split('-')[1]))
+for image in tqdm(images):
+ video.write(cv2.imread(os.path.join(image_folder, image)))
+
+cv2.destroyAllWindows()
+video.release()
+
+
+# import os
+# import moviepy.video.io.ImageSequenceClip
+# image_folder = './video_seg/v2j'
+# fps = 10
+#
+# image_files = [os.path.join(image_folder,img)
+# for img in os.listdir(image_folder)
+# if img.endswith(".jpg")]
+# clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(image_files, fps=fps)
+# clip.write_videofile('my_video.mp4') \ No newline at end of file
diff --git a/cv/seg/utils.py b/cv/seg/utils.py
new file mode 100644
index 0000000..0329c84
--- /dev/null
+++ b/cv/seg/utils.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+
+"""
+Function for interactively selecting part of an array displayed as an image with matplotlib.
+"""
+
+import matplotlib.pyplot as plt
+from matplotlib import is_interactive
+from matplotlib.path import Path
+from matplotlib.widgets import LassoSelector, RectangleSelector
+import numpy as np
+
+
+def path_bbox(p):
+ """
+ Return rectangular bounding box of given path.
+ Parameters
+ ----------
+ p : array_like
+ Array of vertices with shape Nx2.
+ Returns
+ -------
+ bbox : array_like
+ Array of bounding box vertices with shape 4x2.
+ """
+
+ assert p.ndim == 2
+ assert p.shape[1] == 2
+
+ ix_min = p[:, 0].argmin()
+ ix_max = p[:, 0].argmax()
+ iy_min = p[:, 1].argmin()
+ iy_max = p[:, 1].argmax()
+
+ return np.array([[p[ix_min, 0], p[iy_min, 1]],
+ [p[ix_min, 0], p[iy_max, 1]],
+ [p[ix_max, 0], p[iy_max, 1]],
+ [p[ix_max, 0], p[iy_min, 1]]])
+
+
+def imshow_select(data, selector='lasso', bbox=False):
+ """
+ Display array as image with region selector.
+
+ Parameters
+ ----------
+ data : array_like
+ Array to display.
+ selector : str
+ Region selector. For `lasso`, use `LassoSelector`; for `rectangle`,
+ use `RectangleSelector`.
+ bbox : bool
+ If True, only return array within rectangular bounding box of selected region.
+ Otherwise, return array with same dimensions as `data` such that selected region
+ contains the corresponding values from `data` and the remainder contains 0.
+ Returns
+ -------
+ region : array_like
+ Data for selected region.
+ mask : array_like
+ Boolean mask with same shape of `data` for selecting the returned region from `data`.
+ """
+
+ interactive = is_interactive()
+ if not interactive:
+ plt.ion()
+ fig = plt.figure()
+ ax = fig.gca()
+ ax.imshow(data)
+
+ x, y = np.meshgrid(np.arange(data.shape[1], dtype=int),
+ np.arange(data.shape[0], dtype=int))
+ pix = np.vstack((x.flatten(), y.flatten())).T
+
+ # Store data in dict value to permit overwriting by nested
+ # functions in Python 2.7:
+ selected = {}
+ selected['data'] = np.zeros_like(data)
+ selected['mask'] = np.tile(False, data.shape)
+
+ def _onselect_lasso(verts):
+ verts = np.array(verts)
+ p = Path(verts)
+ ind = p.contains_points(pix, radius=1)
+ selected['data'].flat[ind] = data.flat[ind]
+ selected['mask'].flat[ind] = True
+ if bbox:
+ b = path_bbox(verts)
+ selected['data'] = selected['data'][int(min(b[:, 1])):int(max(b[:, 1])),
+ int(min(b[:, 0])):int(max(b[:, 0]))]
+
+ def _onselect_rectangle(start, end):
+ verts = np.array([[start.xdata, start.ydata],
+ [start.xdata, end.ydata],
+ [end.xdata, end.ydata],
+ [end.xdata, start.ydata]], int)
+ p = Path(verts)
+ ind = p.contains_points(pix, radius=1)
+ selected['data'].flat[ind] = data.flat[ind]
+ selected['mask'].flat[ind] = True
+ if bbox:
+ b = path_bbox(verts)
+ selected['data'] = selected['data'][min(b[:, 1]):max(b[:, 1]),
+ min(b[:, 0]):max(b[:, 0])]
+
+ name_to_selector = {'lasso': LassoSelector,
+ 'rectangle': RectangleSelector}
+ selector = name_to_selector[selector]
+ onselect_dict = {LassoSelector: _onselect_lasso,
+ RectangleSelector: _onselect_rectangle}
+ kwargs_dict = {LassoSelector: {},
+ RectangleSelector: {'interactive': True}}
+
+ lasso = selector(ax, onselect_dict[selector], **kwargs_dict[selector])
+ input('Press Enter when done')
+ lasso.disconnect_events()
+ if not interactive:
+ plt.ioff()
+ return selected['data'], selected['mask']
+
+
+if __name__ == '__main__':
+ from skimage.data import coins
+
+ data = coins()
+ selected, mask = imshow_select(data, 'lasso', True)
+ plt.imsave('selected.png', selected)
+ plt.imsave('mask.png', mask) \ No newline at end of file
diff --git a/cv/seg/video_seg/processing.py b/cv/seg/video_seg/processing.py
new file mode 100644
index 0000000..eeaa6bb
--- /dev/null
+++ b/cv/seg/video_seg/processing.py
@@ -0,0 +1,58 @@
+import paddlehub as hub
+from PIL import Image
+import os
+import cv2
+import matplotlib.pyplot as plt
+import matplotlib.image as mpimg
+import numpy as np
+
+
+def video2jpg(video_file, output_path):
+ '''
+ 将视频文件video_file每一帧转成图片保存到output_path文件夹
+ '''
+ try:
+ os.makedirs(output_path) # 创建输出文件夹
+ except:
+ print()
+
+ # 读取视频文件
+ cap = cv2.VideoCapture(video_file)
+ n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ num = 0
+ while True:
+ ret, frame = cap.read()
+ if ret:
+ cv2.imwrite('{}/{}-{}.jpg'.format(output_path, n_frames, num), frame)
+ num += 1
+ else:
+ break
+ cap.release() # 关闭视频
+
+
+def humanseg(images):
+
+
+ # 装载模型
+ module = hub.Module(name="deeplabv3p_xception65_humanseg")
+ # 执行模型segmentation(抠图)命令
+ module.segmentation(data={"image": images}, visualization=True)
+
+ # for i, img in enumerate(images):
+ # print(i, img)
+ # result = module.segmentation(data={"image": [img]}, visualization=True)
+
+
+def file_list(listdir):
+ im_list = []
+ imgs = os.listdir(listdir)
+ for img in imgs:
+ im_list.append(os.path.join(listdir, img))
+ return im_list
+
+
+if __name__ == '__main__':
+ # video2jpg('vtest.avi', 'v2j')
+
+ img_list = file_list('./v2j')
+ humanseg(img_list)
diff --git a/cv/seg/video_seg/vtest.avi b/cv/seg/video_seg/vtest.avi
new file mode 100755
index 0000000..965ab12
--- /dev/null
+++ b/cv/seg/video_seg/vtest.avi
Binary files differ
diff --git a/cv/tracker/object_tracker.py b/cv/tracker/object_tracker.py
new file mode 100644
index 0000000..315ffa3
--- /dev/null
+++ b/cv/tracker/object_tracker.py
@@ -0,0 +1,35 @@
+import cv2
+
+cap = cv2.VideoCapture(0)
+
+tracker = cv2.TrackerGOTURN_create()
+success, img = cap.read()
+
+# select a bounding box ( ROI )
+bbox = cv2.selectROI("Tracking", img, False)
+tracker.init(img, bbox)
+
+
+def drawBox(img, bbox):
+ x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
+ cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 3, 1)
+ cv2.putText(img, "Tracking", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
+
+
+while True:
+ timer = cv2.getTickCount()
+ success, img = cap.read()
+
+ success, bbox = tracker.update(img)
+
+ if success:
+ drawBox(img, bbox)
+ else:
+ cv2.putText(img, "Loss", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
+
+ fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
+ cv2.putText(img, str(int(fps)), (75, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
+ cv2.imshow("Tracking", img)
+
+ if cv2.waitKey(1) & 0xff == ord('q'):
+ break \ No newline at end of file
diff --git a/fine_tune/dog_breed.py b/fine_tune/dog_breed.py
new file mode 100644
index 0000000..6ad1410
--- /dev/null
+++ b/fine_tune/dog_breed.py
@@ -0,0 +1,169 @@
+import torch,os,torchvision
+import torch.nn as nn
+import torch.nn.functional as F
+import pandas as pd
+import numpy as np
+import matplotlib.pyplot as plt
+from torch.utils.data import DataLoader, Dataset
+from torchvision import datasets, models, transforms
+from PIL import Image
+from sklearn.model_selection import StratifiedShuffleSplit
+from torchviz import make_dot
+from datetime import datetime
+from tqdm import tqdm
+
+
+DATA_ROOT = './data/dog-breed-identification'
+
+
+
+from functools import wraps
+from time import time
+
+def timing(f):
+ @wraps(f)
+ def wrap(*args, **kw):
+ ts = time()
+ result = f(*args, **kw)
+ te = time()
+ print('func:%r args:[%r, %r] took: %2.4f sec' % \
+ (f.__name__, args, kw, te-ts))
+ return result
+ return wrap
+
+
+class DogDataset(Dataset):
+ def __init__(self, labels_df, img_path, transform=None):
+ self.labels_df = labels_df
+ self.img_path = img_path
+ self.transform = transform
+
+ def __len__(self):
+ return self.labels_df.shape[0]
+
+ def __getitem__(self, idx):
+ image_name = os.path.join(self.img_path, self.labels_df.id[idx]) + '.jpg'
+ img = Image.open(image_name)
+ label = self.labels_df.label_idx[idx]
+
+ if self.transform:
+ img = self.transform(img)
+ return img, label
+
+@timing
+def train(model,device, train_loader, epoch):
+ model.train()
+ for batch_idx, data in tqdm(enumerate(train_loader)):
+ print('{}: {}/{}, {}'.format(epoch, batch_idx, len(train_dataset), data[0].shape))
+ x,y= data
+ x=x.to(device)
+ y=y.to(device)
+ optimizer.zero_grad()
+ y_hat= model(x)
+ loss = criterion(y_hat, y)
+ loss.backward()
+ optimizer.step()
+ print ('Train Epoch: {}\t Loss: {:.6f}'.format(epoch,loss.item()))
+
+
+def test(model, device, test_loader):
+ model.eval()
+ test_loss = 0
+ correct = 0
+ with torch.no_grad():
+ for i,data in enumerate(test_loader):
+ x,y= data
+ x=x.to(device)
+ y=y.to(device)
+ optimizer.zero_grad()
+ y_hat = model(x)
+ test_loss += criterion(y_hat, y).item() # sum up batch loss
+ pred = y_hat.max(1, keepdim=True)[1] # get the index of the max log-probability
+ correct += pred.eq(y.view_as(pred)).sum().item()
+ test_loss /= len(test_loader.dataset)
+ print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
+ test_loss, correct, len(val_dataset),
+ 100. * correct / len(val_dataset)))
+
+
+if __name__ == '__main__':
+
+ all_labels_df = pd.read_csv(os.path.join(DATA_ROOT, 'labels.csv'))
+ breeds = all_labels_df.breed.unique()
+ breed2idx = dict((breed, idx) for idx, breed in enumerate(breeds))
+ idx2breed = dict((idx, breed) for idx, breed in enumerate(breeds))
+ # all_labels_df['label_idx'] = [breed2idx[b] for b in all_labels_df.breed]
+ all_labels_df['label_idx'] = all_labels_df['breed'].apply(lambda x: breed2idx[x])
+ print(all_labels_df.head())
+
+ model_ft = models.resnet50(pretrained=True)
+ models.resnet18()
+ print(model_ft)
+ # make_dot(yhat, params=dict(list(model_ft.named_parameters()))).render("resnet50_torchviz", format="png")
+
+ IMG_SIZE = 224 # resnet50的输入是224的所以需要将图片统一大小
+ BATCH_SIZE = 256 # 这个批次大小需要占用4.6-5g的显存,如果不够的化可以改下批次,如果内存超过10G可以改为512
+ IMG_MEAN = [0.485, 0.456, 0.406]
+ IMG_STD = [0.229, 0.224, 0.225]
+ CUDA = torch.cuda.is_available()
+ DEVICE = torch.device("cuda" if CUDA else "cpu")
+
+ train_transforms = transforms.Compose([
+ transforms.Resize(IMG_SIZE),
+ transforms.RandomResizedCrop(IMG_SIZE),
+ transforms.RandomHorizontalFlip(),
+ transforms.RandomRotation(30),
+ transforms.ToTensor(),
+ transforms.Normalize(IMG_MEAN, IMG_STD)
+ ])
+
+ val_transforms = transforms.Compose([
+ transforms.Resize(IMG_SIZE),
+ transforms.CenterCrop(IMG_SIZE),
+ transforms.ToTensor(),
+ transforms.Normalize(IMG_MEAN, IMG_STD)
+ ])
+
+ dataset_names = ['train', 'valid']
+ stratified_split = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)
+ train_split_idx, val_split_idx = next(iter(stratified_split.split(all_labels_df.id, all_labels_df.breed)))
+ train_df = all_labels_df.iloc[train_split_idx].reset_index()
+ val_df = all_labels_df.iloc[val_split_idx].reset_index()
+ # print(len(train_df))
+ # print(len(val_df))
+
+ image_transforms = {'train': train_transforms, 'valid': val_transforms}
+
+ train_dataset = DogDataset(train_df, os.path.join(DATA_ROOT, 'train'), transform=image_transforms['train'])
+ val_dataset = DogDataset(val_df, os.path.join(DATA_ROOT, 'train'), transform=image_transforms['valid'])
+ image_dataset = {'train': train_dataset, 'valid': val_dataset}
+
+ image_dataloader = {x: DataLoader(image_dataset[x], batch_size=BATCH_SIZE, shuffle=True, num_workers=0) for x in
+ dataset_names}
+ dataset_sizes = {x: len(image_dataset[x]) for x in dataset_names}
+
+ # batch = next(iter(train_dataset))
+ # yhat = model_ft(batch[0].unsqueeze(0))
+ # make_dot(yhat).render("resnet_ft")
+
+ # 将所有的参数层进行冻结
+ for param in model_ft.parameters():
+ param.requires_grad = False
+ # 这里打印下全连接层的信息
+ print(model_ft.fc)
+ num_fc_ftr = model_ft.fc.in_features # 获取到fc层的输入
+ model_ft.fc = nn.Linear(num_fc_ftr, len(breeds)) # 定义一个新的FC层
+ model_ft = model_ft.to(DEVICE) # 放到设备中
+ print(model_ft) # 最后再打印一下新的模型
+
+ criterion = nn.CrossEntropyLoss()
+ optimizer = torch.optim.Adam([
+ {'params': model_ft.fc.parameters()}
+ ], lr=0.001) # 指定 新加的fc层的学习率
+
+ model_parameters = filter(lambda p: p.requires_grad, model_ft.parameters())
+ params = sum([np.prod(p.size()) for p in model_parameters])
+ print('params: {}, start time: {}'.format(params, datetime.now()))
+ for epoch in range(1, 10):
+ train(model=model_ft, device=DEVICE, train_loader=image_dataloader["train"], epoch=epoch)
+ test(model=model_ft, device=DEVICE, test_loader=image_dataloader["valid"])
diff --git a/hpc/mlti_process.py b/hpc/mlti_process.py
new file mode 100644
index 0000000..4a129c4
--- /dev/null
+++ b/hpc/mlti_process.py
@@ -0,0 +1,18 @@
+import os
+from multiprocessing import Process, Pool, Manager
+from random import random
+
+
+def monte_carlo_pi(i, n=5000_0000):
+ print('epoch: {}'.format(i))
+ cnt = 0
+ for i in range(n):
+ x, y = random(), random()
+ if x**2 + y**2 <= 1:
+ cnt += 1
+ return 4*cnt/n
+
+
+if __name__ == '__main__':
+ pool = Pool(os.cpu_count()//2)
+ pool.map(monte_carlo_pi, range(10))
diff --git a/hpc/mlti_threads.py b/hpc/mlti_threads.py
new file mode 100644
index 0000000..9eb73ac
--- /dev/null
+++ b/hpc/mlti_threads.py
@@ -0,0 +1,33 @@
+import os
+from multiprocessing import Process, Pool, Manager
+from random import random
+from threading import Thread
+import os
+
+
+
+def monte_carlo_pi(i, n=5000_0000):
+ print('epoch: {}'.format(i))
+ cnt = 0
+ for i in range(n):
+ x, y = random(), random()
+ if x**2 + y**2 <= 1:
+ cnt += 1
+ return 4*cnt/n
+
+
+if __name__ == '__main__':
+ # pool = Pool(os.cpu_count()//2)
+ # pool.map(monte_carlo_pi, range(10))
+
+ thread_list = []
+ n_threads = 10
+ for i in range(n_threads):
+ t = Thread(target=monte_carlo_pi, args=(i, ))
+ thread_list.append(t)
+
+ for t in thread_list:
+ t.start()
+
+ for t in thread_list:
+ t.join()
diff --git a/hpc/demo.py b/hpc/mpi_demo.py
index 0adfe17..0adfe17 100644
--- a/hpc/demo.py
+++ b/hpc/mpi_demo.py
diff --git a/learn_torch/learn_nn/custom_module.py b/learn_torch/learn_nn/custom_module.py
new file mode 100644
index 0000000..7052b14
--- /dev/null
+++ b/learn_torch/learn_nn/custom_module.py
@@ -0,0 +1,21 @@
+import torch
+from torch import nn
+
+
+class MySeq(torch.nn.Module):
+ def __init__(self, *args):
+ super().__init__()
+ for block in args:
+ self._modules[block] = block
+
+ def forward(self, X):
+ for block in self._modules.values():
+ X = block(X)
+ return X
+
+
+if __name__ == '__main__':
+ X = torch.rand(2, 20)
+ net = MySeq(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
+ net(X)
+
diff --git a/learn_torch/seq/char_rnn.py b/learn_torch/seq/char_rnn.py
index 82cc924..552266b 100644
--- a/learn_torch/seq/char_rnn.py
+++ b/learn_torch/seq/char_rnn.py
@@ -4,6 +4,8 @@ import glob
import os
import unicodedata
import string
+import torch
+from torch import nn
def find_files(path):
@@ -31,11 +33,71 @@ def build_vocab(filepath):
return category_lines, all_categories
+def letter_to_index(letter):
+ return all_letters.index(letter)
+
+
+def letter_to_tensor(letter):
+ tensor = torch.zeros(1, n_letters)
+ tensor[0][letter_to_index(letter)] = 1
+ return tensor
+
+
+def line_to_tensor(line):
+ tenor = torch.zeros(len(line), 1, n_letters)
+ for i, letter in enumerate(line):
+ tenor[i][0] = letter_to_tensor(letter)
+ return tenor
+
+
+class RNN(torch.nn.Module):
+ def __init__(self, input_size, hidden_size, output_size):
+ super(RNN, self).__init__()
+ self.hidden_size = hidden_size
+ self.i2h = torch.nn.Linear(input_size + hidden_size, hidden_size)
+ self.i2o = torch.nn.Linear(input_size + hidden_size, output_size)
+ self.softmax = torch.nn.LogSoftmax(dim=1)
+
+ def forward(self, input, hidden):
+ combined = torch.cat((input, hidden), 1)
+ hidden = self.i2h(combined)
+ output = self.i2o(combined)
+ output = self.softmax(output)
+ return output, hidden
+
+ def init_hidden(self):
+ return torch.zeros(1, self.hidden_size)
+
+
+def category_from_output(output):
+ top_v, top_i = output.topk(1)
+ category_i = top_i[0].item()
+ return category_i, all_categories[category_i], top_v.item()
+
+
+def train(x, y):
+ hidden = rnn.init_hidden()
+ rnn.zero_grad()
+ for i in range(x.shape[0]):
+ output, hidden = rnn.forward(x[i], hidden)
+ loss = criterion(output, y)
+ loss.backward()
+ for p in rnn.parameters():
+ p.data.add_(p.grad.data, alpha=-lr)
+ return output, loss.item()
+
if __name__ == '__main__':
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
- print('n_letters = {}'.format(n_letters))
+
category_lines, all_categories = build_vocab('../text_data/names/*.txt')
- print(all_categories)
+
+ n_categories = len(all_categories)
+ n_hidden = 128
+ rnn = RNN(n_letters, n_hidden, n_categories)
+ lr = 1e-5
+ criterion = torch.nn.NLLLoss()
+
+ nn.CrossEntropyLoss()
diff --git a/rl/gym_demo/carl_pole.py b/rl/gym_demo/carl_pole.py
new file mode 100644
index 0000000..146c337
--- /dev/null
+++ b/rl/gym_demo/carl_pole.py
@@ -0,0 +1,47 @@
+
+import gym
+import numpy as np
+
+class BespokeAgent:
+ def __init__(self, env):
+ pass
+
+ def decide(self, observation):
+ position, velocity = observation
+ lb = min(-0.09*(position + 0.25) ** 2 + 0.03, 0.3*(position + 0.9)**4 - 0.008)
+ ub = -0.07*(position + 0.38) ** 2 + 0.07
+ if lb < velocity < ub:
+ action = 2
+ else:
+ action = 0
+ # print('observation: {}, lb: {}, ub: {} => action: {}'.format(observation, lb, ub, action))
+ return action
+
+ def learn(self, *argg):
+ pass
+
+
+def play(i, agent, env, render=True, train=False):
+ episode_reward = 0
+ observation = env.reset()
+ while True:
+ if render:
+ env.render()
+ action = agent.decide(observation)
+ next_observation, reward, done, _ = env.step(action)
+ episode_reward += reward
+ if train:
+ agent.learn(observation, action, reward, done)
+ if done:
+ env.close()
+ break
+ observation = next_observation
+ print(i, episode_reward)
+ return i, episode_reward
+
+
+if __name__ == '__main__':
+ env = gym.make('MountainCar-v0')
+ agent = BespokeAgent(env)
+ rewards = [play(i, agent, env) for i in range(100)]
+ print(rewards)
diff --git a/vis/animation/text_animation.py b/vis/animation/text_animation.py
new file mode 100644
index 0000000..1558d19
--- /dev/null
+++ b/vis/animation/text_animation.py
@@ -0,0 +1,18 @@
+from matplotlib import pyplot as plt, animation
+
+plt.rcParams["figure.figsize"] = [7.50, 3.50]
+plt.rcParams["figure.autolayout"] = True
+fig, ax = plt.subplots()
+ax.set(xlim=(-1, 1), ylim=(-1, 1))
+string = 'Hello, how are you doing?'
+label = ax.text(0, 0, string[0], ha='center', va='center', fontsize=20, color="Red")
+
+
+def animate(i):
+ label.set_text(string[:i + 1])
+
+
+anim = animation.FuncAnimation(
+ fig, animate, interval=100, frames=len(string))
+ax.axis('off')
+plt.show()
diff --git a/web/flask_proj/app.py b/web/flask_proj/app.py
new file mode 100644
index 0000000..2c3abb8
--- /dev/null
+++ b/web/flask_proj/app.py
@@ -0,0 +1,14 @@
+from flask import Flask, render_template
+
+app = Flask(__name__)
+
+
+@app.route('/')
+@app.route('/home')
+def home():
+ # return "<h1 style='color: blue;'> hello world! </h1>"
+ return render_template('index.html')
+
+
+if __name__ == '__main__':
+ app.run(debug=True, port=5001)
diff --git a/web/flask_proj/templates/index.html b/web/flask_proj/templates/index.html
new file mode 100644
index 0000000..b9be263
--- /dev/null
+++ b/web/flask_proj/templates/index.html
@@ -0,0 +1,20 @@
+<!DOCUTYPE html>
+<html>
+<head>
+
+</head>
+<body>
+ <form>
+ <table>
+ <tr>
+ <td> Name: </td>
+ <td> <input type="text" name="" placeholder="name"></td>
+ </tr>
+ <tr>
+ <td> Password: </td>
+ <td> <input type="password" name="" placeholder="password"> </td>
+ </tr>
+ </table>
+ </form>
+</body>
+</html>
diff --git a/web/html_basics.html b/web/html_basics.html
new file mode 100644
index 0000000..08b5577
--- /dev/null
+++ b/web/html_basics.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <body>
+
+ <p style="color: red"> hello world! </p>
+
+ </body>
+ </head>
+</html> \ No newline at end of file