Использование предобученной модели transformers

from transformers import AutoModelForSequenceClassification, AutoTokenizer
from transformers import pipeline
MODEL_PATH = "Cleighton071/autotrain-detection-for-product-location-44269111684"
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)

classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
classifier('i love you')

# [{'label': 'Location', 'score': 0.9967827796936035}]

Получение метрик:

inputs = tokenizer('i love you', return_tensors="pt")
with torch.no_grad():
    outputs = model(**inputs)

    pt_predictions = nn.functional.softmax(outputs.logits, dim=-1)
    print(pt_predictions)

    # tensor([[0.0032, 0.9968]], grad_fn=<SoftmaxBackward0>)
Print Friendly, PDF & Email

Добавить комментарий