Bot for telegram
This commit is contained in:
parent
3775463640
commit
c1b2fba6ee
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from telegram.ext import Updater
|
||||
from telegram.ext import CommandHandler
|
||||
from telegram.ext import MessageHandler, Filters
|
||||
import logging
|
||||
|
||||
exec(open("dialrequirements.py").read())
|
||||
|
||||
|
||||
def start(update, context):
|
||||
context.bot.send_message(
|
||||
chat_id=update.effective_chat.id, text="Ongi etorri elkarrizketara."
|
||||
)
|
||||
|
||||
|
||||
def echo(update, context):
|
||||
context.bot.send_message(
|
||||
chat_id=update.effective_chat.id, text=update.message.text
|
||||
)
|
||||
|
||||
|
||||
def reply(update, context):
|
||||
user = update.message.text
|
||||
if user == "Agur":
|
||||
context.bot.send_message(
|
||||
chat_id=update.effective_chat.id, text="Agur"
|
||||
)
|
||||
updater.stop()
|
||||
else:
|
||||
sentence = evaluate(" ".join(tokenizer(user)))
|
||||
context.bot.send_message(
|
||||
chat_id=update.effective_chat.id, text=sentence.strip().capitalize()
|
||||
)
|
||||
|
||||
updater = Updater(
|
||||
token="1213404233:AAHYV6hxaQVnypuhJytZPPtETufMZKkKS3Y", use_context=True
|
||||
)
|
||||
|
||||
dispatcher = updater.dispatcher
|
||||
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
level=logging.INFO,
|
||||
)
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
|
||||
start_handler = CommandHandler("start", start)
|
||||
dispatcher.add_handler(start_handler)
|
||||
|
||||
reply_handler = MessageHandler(Filters.text & (~Filters.command), reply)
|
||||
dispatcher.add_handler(reply_handler)
|
||||
|
||||
print("Start")
|
||||
updater.start_polling()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,75 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import torch
|
||||
import random
|
||||
from argparse import ArgumentParser
|
||||
from model import *
|
||||
from spacy.lang.eu import Basque
|
||||
|
||||
nlp = Basque()
|
||||
|
||||
|
||||
def tokenizer(s):
|
||||
return list(map(lambda x: x.text, nlp(s)))
|
||||
|
||||
|
||||
parser = ArgumentParser(
|
||||
description="Azpitituluetan oinarritutako elkarrizketa \
|
||||
sistemaren proba"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-decoding_strategy",
|
||||
type=str,
|
||||
default="top1",
|
||||
choices=["top1", "topk", "multinomial"],
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def decode(logits, decoding_strategy="max", k=3, temp=0.4):
|
||||
if decoding_strategy == "top1":
|
||||
target = logits.max(1)[1]
|
||||
elif decoding_strategy == "topk":
|
||||
target = logits.topk(k)[1][0][random.randint(0, k - 1)].unsqueeze(-1)
|
||||
else:
|
||||
target = torch.multinomial(logits.squeeze().div(temp).exp().cpu(), 1)
|
||||
return target
|
||||
|
||||
|
||||
def evaluate(sentence):
|
||||
with torch.no_grad():
|
||||
sentence = "<sos> " + sentence + " <eos>"
|
||||
sent_len = len(sentence.split())
|
||||
sentence = (
|
||||
torch.Tensor(
|
||||
[text_field.vocab.stoi[i] for i in sentence.lower().split()]
|
||||
)
|
||||
.long()
|
||||
.view(sent_len, 1)
|
||||
)
|
||||
target = torch.Tensor([text_field.vocab.stoi["<sos>"]]).long()
|
||||
output_sentence = ""
|
||||
encoder_outputs, hidden = model.encoder(sentence)
|
||||
for t in range(MAX_LENGTH):
|
||||
# first input to the decoder is the <sos> token
|
||||
output, hidden = model.decoder(target, hidden, encoder_outputs)
|
||||
target = decode(output, decoding_strategy)
|
||||
word = text_field.vocab.itos[target.numpy()[0]]
|
||||
if word == "<eos>":
|
||||
return output_sentence
|
||||
else:
|
||||
output_sentence = output_sentence + " " + word
|
||||
return output_sentence
|
||||
|
||||
|
||||
# Load model and fields
|
||||
text_field = torch.load("../model/text_field.Field")
|
||||
model = torch.load("../model/model.pt", map_location=torch.device("cpu"))
|
||||
torch.nn.Module.dump_patches = True
|
||||
MAX_LENGTH = 10
|
||||
|
||||
|
||||
model.eval()
|
||||
decoding_strategy = args.decoding_strategy
|
Loading…
Reference in New Issue