summaryrefslogtreecommitdiff
path: root/fine_tune/bert/tutorials
diff options
context:
space:
mode:
authorzhang <zch921005@126.com>2022-07-03 12:49:17 +0800
committerzhang <zch921005@126.com>2022-07-03 12:49:17 +0800
commit8131406d83acad62de49ad51f219b3d2dba562d8 (patch)
tree223b630dcd131bbd1d1e061a3c45dd719f2238eb /fine_tune/bert/tutorials
parent8c1d5025e8a7c341ea651821222229ca75cd8208 (diff)
torch.no_grad vs. requires_grad
Diffstat (limited to 'fine_tune/bert/tutorials')
-rw-r--r--fine_tune/bert/tutorials/03_bert_input_embedding.py20
-rw-r--r--fine_tune/bert/tutorials/samples/BERT-embeddings-2.pngbin0 -> 23064 bytes
2 files changed, 20 insertions, 0 deletions
diff --git a/fine_tune/bert/tutorials/03_bert_input_embedding.py b/fine_tune/bert/tutorials/03_bert_input_embedding.py
new file mode 100644
index 0000000..41bdd29
--- /dev/null
+++ b/fine_tune/bert/tutorials/03_bert_input_embedding.py
@@ -0,0 +1,20 @@
+
+from transformers import BertTokenizer, BertModel
+from transformers.models.bert import BertModel
+import torch
+from torch import nn
+
+
+model_name = 'bert-base-uncased'
+
+tokenizer = BertTokenizer.from_pretrained(model_name)
+model = BertModel.from_pretrained(model_name, output_hidden_states=True)
+
+test_sent = 'this is a test sentence'
+
+model_input = tokenizer(test_sent, return_tensors='pt')
+
+
+model.eval()
+with torch.no_grad():
+ output = model(**model_input)
diff --git a/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png b/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png
new file mode 100644
index 0000000..9b7072e
--- /dev/null
+++ b/fine_tune/bert/tutorials/samples/BERT-embeddings-2.png
Binary files differ