summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzhang <zch921005@126.com>2022-07-03 12:49:17 +0800
committerzhang <zch921005@126.com>2022-07-03 12:49:17 +0800
commit8131406d83acad62de49ad51f219b3d2dba562d8 (patch)
tree223b630dcd131bbd1d1e061a3c45dd719f2238eb
parent8c1d5025e8a7c341ea651821222229ca75cd8208 (diff)
torch.no_grad vs. requires_grad
-rw-r--r--fine_tune/bert/tutorials/03_bert_input_embedding.py20
-rw-r--r--fine_tune/bert/tutorials/samples/BERT-embeddings-2.pngbin0 -> 23064 bytes
-rw-r--r--fine_tune/bert_parameters.py2
-rw-r--r--fine_tune/input_output.py13
-rw-r--r--myweb/demo/app.py11
-rw-r--r--myweb/demo/templates/index.html4
6 files changed, 50 insertions, 0 deletions
diff --git a/fine_tune/bert/tutorials/03_bert_input_embedding.py b/fine_tune/bert/tutorials/03_bert_input_embedding.py
new file mode 100644
index 0000000..41bdd29
--- /dev/null
+++ b/fine_tune/bert/tutorials/03_bert_input_embedding.py
@@ -0,0 +1,20 @@
+
+from transformers import BertTokenizer, BertModel
+from transformers.models.bert import BertModel
+import torch
+from torch import nn
+
+
+model_name = 'bert-base-uncased'
+
+tokenizer = BertTokenizer.from_pretrained(model_name)
+model = BertModel.from_pretrained(model_name, output_hidden_states=True)
+
+test_sent = 'this is a test sentence'
+
+model_input = tokenizer(test_sent, return_tensors='pt')
+
+
+model.eval()
+with torch.no_grad():
+ output = model(**model_input)
diff --git a/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png b/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png
new file mode 100644
index 0000000..9b7072e
--- /dev/null
+++ b/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png
Binary files differ
diff --git a/fine_tune/bert_parameters.py b/fine_tune/bert_parameters.py
index 98e2b15..bf6e8c1 100644
--- a/fine_tune/bert_parameters.py
+++ b/fine_tune/bert_parameters.py
@@ -9,6 +9,8 @@ model_name = 'bert-base-uncased'
model = BertModel.from_pretrained(model_name)
cls_model = BertForSequenceClassification.from_pretrained(model_name)
+
+
total_params = 0
total_learnable_params = 0
total_embedding_params = 0
diff --git a/fine_tune/input_output.py b/fine_tune/input_output.py
new file mode 100644
index 0000000..684ded5
--- /dev/null
+++ b/fine_tune/input_output.py
@@ -0,0 +1,13 @@
+
+from transformers import BertModel, BertTokenizer
+
+model_name = 'bert-base-uncased'
+
+tokenizer = BertTokenizer.from_pretrained(model_name)
+model = BertModel.from_pretrained(model_name)
+
+raw_sentences = ['Tom likes cats', 'Liz likes dogs']
+
+inputs = tokenizer.encode_plus(raw_sentences[0], raw_sentences[1], return_tensors='pt')
+# inputs = tokenizer('Hello, my dog is cute', return_tensors='pt')
+model(**inputs)
diff --git a/myweb/demo/app.py b/myweb/demo/app.py
new file mode 100644
index 0000000..86a9b08
--- /dev/null
+++ b/myweb/demo/app.py
@@ -0,0 +1,11 @@
+from flask import Flask, render_template
+
+app = Flask(__name__)
+
+@app.route('/')
+def index():
+ return render_template('index.html')
+
+if __name__ == '__main__':
+ app.run()
+
diff --git a/myweb/demo/templates/index.html b/myweb/demo/templates/index.html
new file mode 100644
index 0000000..c5c4431
--- /dev/null
+++ b/myweb/demo/templates/index.html
@@ -0,0 +1,4 @@
+
+<html>
+hello world!
+</html> \ No newline at end of file