How can I use this model offline?

#5
by spylft - opened

I want to use this model offline, but get some outputs different with the online api version(https://api-inference.huggingface.co/models/jtlicardo/bpmn-information-extraction-v2).
Here's my code:
tokenizer = AutoTokenizer.from_pretrained(
"./bpmn-information-extraction-v2"
)
model = AutoModelForTokenClassification.from_pretrained(
"./bpmn-information-extraction-v2"
)
inputs = tokenizer("Textual description of the process", return_tensors="pt")
print(inputs)
outputs = model(**inputs)
logits = outputs.logits
predictions = torch.argmax(logits, dim=2)
labels = model.config.id2label
tokens = tokenizer.convert_ids_to_tokens(inputs.input_ids[0])
entities = []
for token, prediction, start in zip(
tokens, predictions[0].tolist(), inputs.input_ids[0]
):
if token not in tokenizer.all_special_tokens:
entity = {
"entity_group": labels[prediction],
"score": torch.softmax(logits[0], dim=1)[
0, prediction
].item(),
"word": token,
"start": start,
"end": start + len(token),
}
entities.append(entity)
print(entities)

My code will output
[
{'entity_group': 'I-TASK', 'score': 0.21364660561084747, 'word': 'Text', 'start': tensor(18430), 'end': tensor(18434)},
{'entity_group': 'I-TASK', 'score': 0.21364660561084747, 'word': '##ual', 'start': tensor(4746), 'end': tensor(4751)},
{'entity_group': 'I-TASK', 'score': 0.21364660561084747, 'word': 'description', 'start': tensor(6136), 'end': tensor(6147)},
{'entity_group': 'B-TASK_INFO', 'score': 0.035756561905145645, 'word': 'of', 'start': tensor(1104), 'end': tensor(1106)},
{'entity_group': 'I-TASK_INFO', 'score': 0.08219338953495026, 'word': 'the', 'start': tensor(1103), 'end': tensor(1106)},
{'entity_group': 'I-PROCESS_INFO', 'score': 0.1567014753818512, 'word': 'process', 'start': tensor(1965), 'end': tensor(1972)}
]
while api version will output
[{'entity_group': 'TASK',
'score': 0.35820379853248596,
'word': 'Textual description',
'start': 0,
'end': 19},
{'entity_group': 'TASK_INFO',
'score': 0.29744070768356323,
'word': 'of the',
'start': 20,
'end': 26},
{'entity_group': 'PROCESS_INFO',
'score': 0.3056381344795227,
'word': 'process',
'start': 27,
'end': 34}]
I want to know what cause the difference between the model output and api version? And what I should do to generate the outputs downside?

Looks like the Hugging Face API applies additional post-processing, such as merging tokens, grouping entities - but the predictions are essentially the same.

Here is a Python script that should do the trick, courtesy of o1-preview:

from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
import torch.nn.functional as F

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("jtlicardo/bpmn-information-extraction-v2")
model = AutoModelForTokenClassification.from_pretrained(
    "jtlicardo/bpmn-information-extraction-v2"
)

# Use the model's id2label mapping
label_map = model.config.id2label

# Input text
text = "Textual description of the process"
inputs = tokenizer(
    text,
    return_tensors="pt",
    truncation=True,
    padding=True,
    return_offsets_mapping=True,
)

# Move the offsets mapping to CPU and get it as a list
offset_mapping = inputs.pop("offset_mapping")[0].tolist()

# Perform inference
outputs = model(**inputs)
logits = outputs.logits

# Apply softmax to calculate probabilities
probs = F.softmax(logits, dim=2)

# Get the predicted class indices and confidence scores
predicted_class_indices = torch.argmax(logits, dim=2)[0].tolist()
confidence_scores = torch.max(probs, dim=2).values[0].tolist()

# Convert indices to labels
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
predicted_labels = [label_map[idx] for idx in predicted_class_indices]

# Process tokens to extract entities
entities = []
current_entity = None

for idx, (token, label, score, offset) in enumerate(
    zip(tokens, predicted_labels, confidence_scores, offset_mapping)
):
    # Skip special tokens
    if token in tokenizer.all_special_tokens:
        continue

    # Skip tokens with zero-length offset (e.g., spaces)
    if offset[0] == offset[1]:
        continue

    # Handle subword tokens
    if token.startswith("##"):
        token = token[2:]  # Remove '##'
        if current_entity is not None:
            # Append subword token
            current_entity["word"] += token
            current_entity["end"] = offset[1]
            current_entity["scores"].append(score)
        else:
            # Start a new entity (unlikely for subword token)
            current_entity = {
                "entity_group": label[2:] if "-" in label else label,
                "word": token,
                "start": offset[0],
                "end": offset[1],
                "scores": [score],
            }
    else:
        if label.startswith("B-"):
            # Close previous entity if exists
            if current_entity is not None:
                current_entity["score"] = sum(current_entity["scores"]) / len(
                    current_entity["scores"]
                )
                del current_entity["scores"]
                entities.append(current_entity)
            # Start new entity
            current_entity = {
                "entity_group": label[2:],  # Remove 'B-' prefix
                "word": token,
                "start": offset[0],
                "end": offset[1],
                "scores": [score],
            }
        elif label.startswith("I-"):
            entity_type = label[2:]
            if current_entity is not None and current_entity["entity_group"] == entity_type:
                # Continue entity
                if offset[0] == current_entity["end"]:
                    current_entity["word"] += token
                else:
                    current_entity["word"] += " " + token
                current_entity["end"] = offset[1]
                current_entity["scores"].append(score)
            else:
                # Close current entity if it exists
                if current_entity is not None:
                    current_entity["score"] = sum(current_entity["scores"]) / len(
                        current_entity["scores"]
                    )
                    del current_entity["scores"]
                    entities.append(current_entity)
                # Start new entity (handling I- without preceding B-)
                current_entity = {
                    "entity_group": entity_type,
                    "word": token,
                    "start": offset[0],
                    "end": offset[1],
                    "scores": [score],
                }
        else:
            # Handle 'O' or other cases
            if current_entity is not None:
                current_entity["score"] = sum(current_entity["scores"]) / len(
                    current_entity["scores"]
                )
                del current_entity["scores"]
                entities.append(current_entity)
                current_entity = None

# Add any remaining entity
if current_entity is not None:
    current_entity["score"] = sum(current_entity["scores"]) / len(
        current_entity["scores"]
    )
    del current_entity["scores"]
    entities.append(current_entity)

# Print entities
for entity in entities:
    print(entity)

Let me know if this works!

Thanks for your kind reply! It can generate the same output as api version. For I'm a Chinese student so I can't use huggingface api or create my own huggingface api easyly QwQ. That's why I want to use it offline.

Sign up or log in to comment