kuelumbus commited on
Commit
1af0d58
·
1 Parent(s): acc0602

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -33,8 +33,8 @@ Then you can use the model like this:
33
  from sentence_transformers import SentenceTransformer
34
  psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
35
 
36
- model = SentenceTransformer('kuelumbus/polyBERT')
37
- embeddings = model.encode(psmiles_strings)
38
  print(embeddings)
39
  ```
40
 
@@ -60,14 +60,14 @@ psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
60
 
61
  # Load model from HuggingFace Hub
62
  tokenizer = AutoTokenizer.from_pretrained('kuelumbus/polyBERT')
63
- model = AutoModel.from_pretrained('kuelumbus/polyBERT')
64
 
65
  # Tokenize sentences
66
  encoded_input = tokenizer(psmiles_strings, padding=True, truncation=True, return_tensors='pt')
67
 
68
  # Compute token embeddings
69
  with torch.no_grad():
70
- model_output = model(**encoded_input)
71
 
72
  # Perform pooling. In this case, mean pooling.
73
  fingerprints = mean_pooling(model_output, encoded_input['attention_mask'])
 
33
  from sentence_transformers import SentenceTransformer
34
  psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
35
 
36
+ polyBERT = SentenceTransformer('kuelumbus/polyBERT')
37
+ embeddings = polyBERT.encode(psmiles_strings)
38
  print(embeddings)
39
  ```
40
 
 
60
 
61
  # Load model from HuggingFace Hub
62
  tokenizer = AutoTokenizer.from_pretrained('kuelumbus/polyBERT')
63
+ polyBERT = AutoModel.from_pretrained('kuelumbus/polyBERT')
64
 
65
  # Tokenize sentences
66
  encoded_input = tokenizer(psmiles_strings, padding=True, truncation=True, return_tensors='pt')
67
 
68
  # Compute token embeddings
69
  with torch.no_grad():
70
+ model_output = polyBERT(**encoded_input)
71
 
72
  # Perform pooling. In this case, mean pooling.
73
  fingerprints = mean_pooling(model_output, encoded_input['attention_mask'])