From ed026519d959ecc60a895f379c228de5df77ffb0 Mon Sep 17 00:00:00 2001 From: zhang Date: Sun, 19 Jun 2022 09:20:25 +0800 Subject: daily update --- learn_keras/seq2seq.py | 0 learn_torch/bert/fill_mask.py | 28 ++++++++++++++++++++ learn_torch/seq2seq/base_model.py | 51 +++++++++++++++++++++++++++++++++++++ pretrained/swin/603_timm_swin.ipynb | 3 +-- pretrained/swin/demo.py | 1 + web/learn_canvas/basics/index.html | 39 +++++++++++++++++++++++++++- 6 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 learn_keras/seq2seq.py create mode 100644 learn_torch/bert/fill_mask.py create mode 100644 learn_torch/seq2seq/base_model.py diff --git a/learn_keras/seq2seq.py b/learn_keras/seq2seq.py new file mode 100644 index 0000000..e69de29 diff --git a/learn_torch/bert/fill_mask.py b/learn_torch/bert/fill_mask.py new file mode 100644 index 0000000..24e177f --- /dev/null +++ b/learn_torch/bert/fill_mask.py @@ -0,0 +1,28 @@ + +import torch +from datasets import load_dataset +from transformers import BertTokenizer + + +#定义数据集 +class Dataset(torch.utils.data.Dataset): + def __init__(self, split): + dataset = load_dataset(path='seamew/ChnSentiCorp', split=split) + + def f(data): + return len(data['text']) > 30 + + self.dataset = dataset.filter(f) + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, i): + text = self.dataset[i]['text'] + + return text + +if __name__ == '__main__': + dataset = Dataset('train') + print(len(dataset), dataset[0]) + tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') diff --git a/learn_torch/seq2seq/base_model.py b/learn_torch/seq2seq/base_model.py new file mode 100644 index 0000000..bc292cf --- /dev/null +++ b/learn_torch/seq2seq/base_model.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +import torch.optim as optim + +from torchtext.legacy.datasets import Multi30k +from torchtext.legacy.data import Field, BucketIterator + +import spacy +import numpy as np + +import random +import math +import time + + +SEED = 1234 + +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) +torch.cuda.manual_seed(SEED) +torch.backends.cudnn.deterministic = True + +spacy_de = spacy.load('de_core_news_sm') +spacy_en = spacy.load('en_core_web_sm') + +def tokenize_de(text): + """ + Tokenizes German text from a string into a list of strings (tokens) and reverses it + """ + return [tok.text for tok in spacy_de.tokenizer(text)][::-1] + +def tokenize_en(text): + """ + Tokenizes English text from a string into a list of strings (tokens) + """ + return [tok.text for tok in spacy_en.tokenizer(text)] + +SRC = Field(tokenize = tokenize_de, + init_token = '', + eos_token = '', + lower = True) + +TRG = Field(tokenize = tokenize_en, + init_token = '', + eos_token = '', + lower = True) + +train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'), + fields = (SRC, TRG)) + diff --git a/pretrained/swin/603_timm_swin.ipynb b/pretrained/swin/603_timm_swin.ipynb index 5e4ef1c..2d0cbf2 100644 --- a/pretrained/swin/603_timm_swin.ipynb +++ b/pretrained/swin/603_timm_swin.ipynb @@ -320,8 +320,7 @@ "ExecuteTime": { "end_time": "2022-06-03T03:33:39.593106Z", "start_time": "2022-06-03T03:33:39.583112Z" - }, - "collapsed": true + } }, "outputs": [ { diff --git a/pretrained/swin/demo.py b/pretrained/swin/demo.py index 6f8d87b..3b6d9d9 100644 --- a/pretrained/swin/demo.py +++ b/pretrained/swin/demo.py @@ -20,6 +20,7 @@ if __name__ == '__main__': batch_input = transformed.unsqueeze(0) print(batch_input.shape) + timm.list_models() model = timm.create_model('swin_base_patch4_window7_224', pretrained=True) with torch.no_grad(): diff --git a/web/learn_canvas/basics/index.html b/web/learn_canvas/basics/index.html index 3c0cfb8..e3d6e7f 100644 --- a/web/learn_canvas/basics/index.html +++ b/web/learn_canvas/basics/index.html @@ -3,9 +3,46 @@ Title + + - + \ No newline at end of file -- cgit v1.2.3