Add files via upload
This commit is contained in:
parent
72696e8a80
commit
9c2d826362
|
@ -0,0 +1,683 @@
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
try:
|
||||||
|
from gevent.coros import RLock
|
||||||
|
except:
|
||||||
|
from gevent.lock import RLock
|
||||||
|
|
||||||
|
from Config import config
|
||||||
|
from Debug import Debug
|
||||||
|
from util import Msgpack
|
||||||
|
from Crypt import CryptConnection
|
||||||
|
from util import helper
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(object):
|
||||||
|
__slots__ = (
|
||||||
|
"sock", "sock_wrapped", "ip", "port", "cert_pin", "target_onion", "id", "protocol", "type", "server", "unpacker", "unpacker_bytes", "req_id", "ip_type",
|
||||||
|
"handshake", "crypt", "connected", "connecting", "event_connected", "closed", "start_time", "handshake_time", "last_recv_time", "is_private_ip", "is_tracker_connection",
|
||||||
|
"last_message_time", "last_send_time", "last_sent_time", "incomplete_buff_recv", "bytes_recv", "bytes_sent", "cpu_time", "send_lock",
|
||||||
|
"last_ping_delay", "last_req_time", "last_cmd_sent", "last_cmd_recv", "bad_actions", "sites", "name", "waiting_requests", "waiting_streams"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, server, ip, port, sock=None, target_onion=None, is_tracker_connection=False):
|
||||||
|
self.server = server
|
||||||
|
self.sock = sock
|
||||||
|
self.cert_pin = None
|
||||||
|
if "#" in ip:
|
||||||
|
ip, self.cert_pin = ip.split("#")
|
||||||
|
self.target_onion = target_onion # Requested onion adress
|
||||||
|
self.id = server.last_connection_id
|
||||||
|
server.last_connection_id += 1
|
||||||
|
self.protocol = "?"
|
||||||
|
self.type = "?"
|
||||||
|
self.ip_type = "?"
|
||||||
|
self.port = int(port)
|
||||||
|
self.setIp(ip)
|
||||||
|
|
||||||
|
if helper.isPrivateIp(self.ip) and self.ip not in config.ip_local:
|
||||||
|
self.is_private_ip = True
|
||||||
|
else:
|
||||||
|
self.is_private_ip = False
|
||||||
|
self.is_tracker_connection = is_tracker_connection
|
||||||
|
|
||||||
|
self.unpacker = None # Stream incoming socket messages here
|
||||||
|
self.unpacker_bytes = 0 # How many bytes the unpacker received
|
||||||
|
self.req_id = 0 # Last request id
|
||||||
|
self.handshake = {} # Handshake info got from peer
|
||||||
|
self.crypt = None # Connection encryption method
|
||||||
|
self.sock_wrapped = False # Socket wrapped to encryption
|
||||||
|
|
||||||
|
self.connecting = False
|
||||||
|
self.connected = False
|
||||||
|
self.event_connected = gevent.event.AsyncResult() # Solves on handshake received
|
||||||
|
self.closed = False
|
||||||
|
|
||||||
|
# Stats
|
||||||
|
self.start_time = time.time()
|
||||||
|
self.handshake_time = 0
|
||||||
|
self.last_recv_time = 0
|
||||||
|
self.last_message_time = 0
|
||||||
|
self.last_send_time = 0
|
||||||
|
self.last_sent_time = 0
|
||||||
|
self.incomplete_buff_recv = 0
|
||||||
|
self.bytes_recv = 0
|
||||||
|
self.bytes_sent = 0
|
||||||
|
self.last_ping_delay = None
|
||||||
|
self.last_req_time = 0
|
||||||
|
self.last_cmd_sent = None
|
||||||
|
self.last_cmd_recv = None
|
||||||
|
self.bad_actions = 0
|
||||||
|
self.sites = 0
|
||||||
|
self.cpu_time = 0.0
|
||||||
|
self.send_lock = RLock()
|
||||||
|
|
||||||
|
self.name = None
|
||||||
|
self.updateName()
|
||||||
|
|
||||||
|
self.waiting_requests = {} # Waiting sent requests
|
||||||
|
self.waiting_streams = {} # Waiting response file streams
|
||||||
|
|
||||||
|
def setIp(self, ip):
|
||||||
|
self.ip = ip
|
||||||
|
self.ip_type = self.server.getIpType(ip)
|
||||||
|
self.updateName()
|
||||||
|
|
||||||
|
def createSocket(self):
|
||||||
|
if self.server.getIpType(self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"):
|
||||||
|
# Create IPv6 connection as IPv4 when using proxy
|
||||||
|
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||||
|
else:
|
||||||
|
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
|
||||||
|
def updateName(self):
|
||||||
|
self.name = "Conn#%2s %-12s [%s]" % (self.id, self.ip, self.protocol)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s>" % self.__str__()
|
||||||
|
|
||||||
|
def log(self, text):
|
||||||
|
self.server.log.debug("%s > %s" % (self.name, text))
|
||||||
|
|
||||||
|
def getValidSites(self):
|
||||||
|
return [key for key, val in self.server.tor_manager.site_onions.items() if val == self.target_onion]
|
||||||
|
|
||||||
|
def badAction(self, weight=1):
|
||||||
|
self.bad_actions += weight
|
||||||
|
if self.bad_actions > 40:
|
||||||
|
self.close("Too many bad actions")
|
||||||
|
elif self.bad_actions > 20:
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
def goodAction(self):
|
||||||
|
self.bad_actions = 0
|
||||||
|
|
||||||
|
# Open connection to peer and wait for handshake
|
||||||
|
def connect(self):
|
||||||
|
self.connecting = True
|
||||||
|
try:
|
||||||
|
return self._connect()
|
||||||
|
except Exception as err:
|
||||||
|
self.connecting = False
|
||||||
|
self.connected = False
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _connect(self):
|
||||||
|
self.updateOnlineStatus(outgoing_activity=True)
|
||||||
|
|
||||||
|
if not self.event_connected or self.event_connected.ready():
|
||||||
|
self.event_connected = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
self.type = "out"
|
||||||
|
|
||||||
|
unreachability = self.server.getIpUnreachability(self.ip)
|
||||||
|
if unreachability:
|
||||||
|
raise Exception(unreachability)
|
||||||
|
|
||||||
|
if self.ip_type == "onion":
|
||||||
|
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
||||||
|
elif config.trackers_proxy != "disable" and config.tor != "always" and self.is_tracker_connection:
|
||||||
|
if config.trackers_proxy == "tor":
|
||||||
|
self.sock = self.server.tor_manager.createSocket(self.ip, self.port)
|
||||||
|
else:
|
||||||
|
import socks
|
||||||
|
self.sock = socks.socksocket()
|
||||||
|
proxy_ip, proxy_port = config.trackers_proxy.split(":")
|
||||||
|
self.sock.set_proxy(socks.PROXY_TYPE_SOCKS5, proxy_ip, int(proxy_port))
|
||||||
|
else:
|
||||||
|
self.sock = self.createSocket()
|
||||||
|
|
||||||
|
if "TCP_NODELAY" in dir(socket):
|
||||||
|
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
|
||||||
|
timeout_before = self.sock.gettimeout()
|
||||||
|
self.sock.settimeout(30)
|
||||||
|
if self.ip_type == "ipv6" and not hasattr(self.sock, "proxy"):
|
||||||
|
sock_address = (self.ip, self.port, 1, 1)
|
||||||
|
else:
|
||||||
|
sock_address = (self.ip, self.port)
|
||||||
|
|
||||||
|
self.sock.connect(sock_address)
|
||||||
|
|
||||||
|
if self.shouldEncrypt():
|
||||||
|
try:
|
||||||
|
self.wrapSocket()
|
||||||
|
except Exception as err:
|
||||||
|
if self.sock:
|
||||||
|
self.sock.close()
|
||||||
|
self.sock = None
|
||||||
|
if self.mustEncrypt():
|
||||||
|
raise
|
||||||
|
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
||||||
|
self.server.broken_ssl_ips[self.ip] = True
|
||||||
|
return self.connect()
|
||||||
|
|
||||||
|
# Detect protocol
|
||||||
|
event_connected = self.event_connected
|
||||||
|
self.send({"cmd": "handshake", "req_id": 0, "params": self.getHandshakeInfo()})
|
||||||
|
self.server.outgoing_pool.spawn(self.messageLoop)
|
||||||
|
connect_res = event_connected.get() # Wait for handshake
|
||||||
|
if self.sock:
|
||||||
|
self.sock.settimeout(timeout_before)
|
||||||
|
return connect_res
|
||||||
|
|
||||||
|
def mustEncrypt(self):
|
||||||
|
if self.cert_pin:
|
||||||
|
return True
|
||||||
|
if (not self.ip_type == "onion") and config.force_encryption:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def shouldEncrypt(self):
|
||||||
|
if self.mustEncrypt():
|
||||||
|
return True
|
||||||
|
return (
|
||||||
|
(not self.ip_type == "onion")
|
||||||
|
and
|
||||||
|
(self.ip not in self.server.broken_ssl_ips)
|
||||||
|
and
|
||||||
|
(self.ip not in config.ip_local)
|
||||||
|
and
|
||||||
|
("tls-rsa" in CryptConnection.manager.crypt_supported)
|
||||||
|
)
|
||||||
|
|
||||||
|
def wrapSocket(self, crypt="tls-rsa", do_handshake=True):
|
||||||
|
server = (self.type == "in")
|
||||||
|
sock = CryptConnection.manager.wrapSocket(self.sock, crypt, server=server, cert_pin=self.cert_pin)
|
||||||
|
sock.do_handshake()
|
||||||
|
self.crypt = crypt
|
||||||
|
self.sock_wrapped = True
|
||||||
|
self.sock = sock
|
||||||
|
|
||||||
|
# Handle incoming connection
|
||||||
|
def handleIncomingConnection(self, sock):
|
||||||
|
self.log("Incoming connection...")
|
||||||
|
|
||||||
|
if "TCP_NODELAY" in dir(socket):
|
||||||
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
|
||||||
|
self.type = "in"
|
||||||
|
if self.ip not in config.ip_local: # Clearnet: Check implicit SSL
|
||||||
|
try:
|
||||||
|
first_byte = sock.recv(1, gevent.socket.MSG_PEEK)
|
||||||
|
if first_byte == b"\x16":
|
||||||
|
self.log("Crypt in connection using implicit SSL")
|
||||||
|
self.wrapSocket(do_handshake=False)
|
||||||
|
except Exception as err:
|
||||||
|
self.log("Socket peek error: %s" % Debug.formatException(err))
|
||||||
|
self.messageLoop()
|
||||||
|
|
||||||
|
def getMsgpackUnpacker(self):
|
||||||
|
if self.handshake and self.handshake.get("use_bin_type"):
|
||||||
|
return Msgpack.getUnpacker(fallback=True, decode=False)
|
||||||
|
else: # Backward compatibility for <0.7.0
|
||||||
|
return Msgpack.getUnpacker(fallback=True, decode=True)
|
||||||
|
|
||||||
|
# Message loop for connection
|
||||||
|
def messageLoop(self):
|
||||||
|
if not self.sock:
|
||||||
|
self.log("Socket error: No socket found")
|
||||||
|
return False
|
||||||
|
self.protocol = "v2"
|
||||||
|
self.updateName()
|
||||||
|
self.connected = True
|
||||||
|
self.connecting = False
|
||||||
|
buff_len = 0
|
||||||
|
req_len = 0
|
||||||
|
self.unpacker_bytes = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
while not self.closed:
|
||||||
|
buff = self.sock.recv(64 * 1024)
|
||||||
|
if not buff:
|
||||||
|
break # Connection closed
|
||||||
|
buff_len = len(buff)
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.last_recv_time = time.time()
|
||||||
|
self.incomplete_buff_recv += 1
|
||||||
|
self.bytes_recv += buff_len
|
||||||
|
self.server.bytes_recv += buff_len
|
||||||
|
req_len += buff_len
|
||||||
|
|
||||||
|
if not self.unpacker:
|
||||||
|
self.unpacker = self.getMsgpackUnpacker()
|
||||||
|
self.unpacker_bytes = 0
|
||||||
|
|
||||||
|
self.unpacker.feed(buff)
|
||||||
|
self.unpacker_bytes += buff_len
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
message = next(self.unpacker)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
if not type(message) is dict:
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Invalid message type: %s, content: %r, buffer: %r" % (type(message), message, buff[0:16]))
|
||||||
|
raise Exception("Invalid message type: %s" % type(message))
|
||||||
|
|
||||||
|
# Stats
|
||||||
|
self.incomplete_buff_recv = 0
|
||||||
|
stat_key = message.get("cmd", "unknown")
|
||||||
|
if stat_key == "response" and "to" in message:
|
||||||
|
cmd_sent = self.waiting_requests.get(message["to"], {"cmd": "unknown"})["cmd"]
|
||||||
|
stat_key = "response: %s" % cmd_sent
|
||||||
|
if stat_key == "update":
|
||||||
|
stat_key = "update: %s" % message["params"]["site"]
|
||||||
|
self.server.stat_recv[stat_key]["bytes"] += req_len
|
||||||
|
self.server.stat_recv[stat_key]["num"] += 1
|
||||||
|
if "stream_bytes" in message:
|
||||||
|
self.server.stat_recv[stat_key]["bytes"] += message["stream_bytes"]
|
||||||
|
req_len = 0
|
||||||
|
|
||||||
|
# Handle message
|
||||||
|
if "stream_bytes" in message:
|
||||||
|
buff_left = self.handleStream(message, buff)
|
||||||
|
self.unpacker = self.getMsgpackUnpacker()
|
||||||
|
self.unpacker.feed(buff_left)
|
||||||
|
self.unpacker_bytes = len(buff_left)
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Start new unpacker with buff_left: %r" % buff_left)
|
||||||
|
else:
|
||||||
|
self.handleMessage(message)
|
||||||
|
|
||||||
|
message = None
|
||||||
|
except Exception as err:
|
||||||
|
if not self.closed:
|
||||||
|
self.log("Socket error: %s" % Debug.formatException(err))
|
||||||
|
self.server.stat_recv["error: %s" % err]["bytes"] += req_len
|
||||||
|
self.server.stat_recv["error: %s" % err]["num"] += 1
|
||||||
|
self.close("MessageLoop ended (closed: %s)" % self.closed) # MessageLoop ended, close connection
|
||||||
|
|
||||||
|
def getUnpackerUnprocessedBytesNum(self):
|
||||||
|
if "tell" in dir(self.unpacker):
|
||||||
|
bytes_num = self.unpacker_bytes - self.unpacker.tell()
|
||||||
|
else:
|
||||||
|
bytes_num = self.unpacker._fb_buf_n - self.unpacker._fb_buf_o
|
||||||
|
return bytes_num
|
||||||
|
|
||||||
|
# Stream socket directly to a file
|
||||||
|
def handleStream(self, message, buff):
|
||||||
|
stream_bytes_left = message["stream_bytes"]
|
||||||
|
file = self.waiting_streams[message["to"]]
|
||||||
|
|
||||||
|
unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
|
||||||
|
|
||||||
|
if unprocessed_bytes_num: # Found stream bytes in unpacker
|
||||||
|
unpacker_stream_bytes = min(unprocessed_bytes_num, stream_bytes_left)
|
||||||
|
buff_stream_start = len(buff) - unprocessed_bytes_num
|
||||||
|
file.write(buff[buff_stream_start:buff_stream_start + unpacker_stream_bytes])
|
||||||
|
stream_bytes_left -= unpacker_stream_bytes
|
||||||
|
else:
|
||||||
|
unpacker_stream_bytes = 0
|
||||||
|
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log(
|
||||||
|
"Starting stream %s: %s bytes (%s from unpacker, buff size: %s, unprocessed: %s)" %
|
||||||
|
(message["to"], message["stream_bytes"], unpacker_stream_bytes, len(buff), unprocessed_bytes_num)
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while 1:
|
||||||
|
if stream_bytes_left <= 0:
|
||||||
|
break
|
||||||
|
stream_buff = self.sock.recv(min(64 * 1024, stream_bytes_left))
|
||||||
|
if not stream_buff:
|
||||||
|
break
|
||||||
|
buff_len = len(stream_buff)
|
||||||
|
stream_bytes_left -= buff_len
|
||||||
|
file.write(stream_buff)
|
||||||
|
|
||||||
|
# Statistics
|
||||||
|
self.last_recv_time = time.time()
|
||||||
|
self.incomplete_buff_recv += 1
|
||||||
|
self.bytes_recv += buff_len
|
||||||
|
self.server.bytes_recv += buff_len
|
||||||
|
except Exception as err:
|
||||||
|
self.log("Stream read error: %s" % Debug.formatException(err))
|
||||||
|
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("End stream %s, file pos: %s" % (message["to"], file.tell()))
|
||||||
|
|
||||||
|
self.incomplete_buff_recv = 0
|
||||||
|
self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
|
||||||
|
del self.waiting_streams[message["to"]]
|
||||||
|
del self.waiting_requests[message["to"]]
|
||||||
|
|
||||||
|
if unpacker_stream_bytes:
|
||||||
|
return buff[buff_stream_start + unpacker_stream_bytes:]
|
||||||
|
else:
|
||||||
|
return b""
|
||||||
|
|
||||||
|
# My handshake info
|
||||||
|
def getHandshakeInfo(self):
|
||||||
|
# No TLS for onion connections
|
||||||
|
if self.ip_type == "onion":
|
||||||
|
crypt_supported = []
|
||||||
|
elif self.ip in self.server.broken_ssl_ips:
|
||||||
|
crypt_supported = []
|
||||||
|
else:
|
||||||
|
crypt_supported = CryptConnection.manager.crypt_supported
|
||||||
|
# No peer id for onion connections
|
||||||
|
if self.ip_type == "onion" or self.ip in config.ip_local:
|
||||||
|
peer_id = ""
|
||||||
|
else:
|
||||||
|
peer_id = self.server.peer_id
|
||||||
|
# Setup peer lock from requested onion address
|
||||||
|
if self.handshake and self.handshake.get("target_ip", "").endswith(".onion") and self.server.tor_manager.start_onions:
|
||||||
|
self.target_onion = self.handshake.get("target_ip").replace(".onion", "") # My onion address
|
||||||
|
if not self.server.tor_manager.site_onions.values():
|
||||||
|
self.server.log.warning("Unknown target onion address: %s" % self.target_onion)
|
||||||
|
|
||||||
|
handshake = {
|
||||||
|
"version": config.version,
|
||||||
|
"protocol": "v2",
|
||||||
|
"use_bin_type": True,
|
||||||
|
"peer_id": peer_id,
|
||||||
|
"fileserver_port": self.server.port,
|
||||||
|
"port_opened": self.server.port_opened.get(self.ip_type, None),
|
||||||
|
"target_ip": self.ip,
|
||||||
|
"rev": config.rev,
|
||||||
|
"crypt_supported": crypt_supported,
|
||||||
|
"crypt": self.crypt,
|
||||||
|
"time": int(time.time())
|
||||||
|
}
|
||||||
|
if self.target_onion:
|
||||||
|
handshake["onion"] = self.target_onion
|
||||||
|
elif self.ip_type == "onion":
|
||||||
|
handshake["onion"] = self.server.tor_manager.getOnion("global")
|
||||||
|
|
||||||
|
if self.is_tracker_connection:
|
||||||
|
handshake["tracker_connection"] = True
|
||||||
|
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("My Handshake: %s" % handshake)
|
||||||
|
|
||||||
|
return handshake
|
||||||
|
|
||||||
|
def setHandshake(self, handshake):
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Remote Handshake: %s" % handshake)
|
||||||
|
|
||||||
|
if handshake.get("peer_id") == self.server.peer_id and not handshake.get("tracker_connection") and not self.is_tracker_connection:
|
||||||
|
self.close("Same peer id, can't connect to myself")
|
||||||
|
self.server.peer_blacklist.append((handshake["target_ip"], handshake["fileserver_port"]))
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.handshake = handshake
|
||||||
|
if handshake.get("port_opened", None) is False and "onion" not in handshake and not self.is_private_ip: # Not connectable
|
||||||
|
self.port = 0
|
||||||
|
else:
|
||||||
|
self.port = int(handshake["fileserver_port"]) # Set peer fileserver port
|
||||||
|
|
||||||
|
if handshake.get("use_bin_type") and self.unpacker:
|
||||||
|
unprocessed_bytes_num = self.getUnpackerUnprocessedBytesNum()
|
||||||
|
self.log("Changing unpacker to bin type (unprocessed bytes: %s)" % unprocessed_bytes_num)
|
||||||
|
unprocessed_bytes = self.unpacker.read_bytes(unprocessed_bytes_num)
|
||||||
|
self.unpacker = self.getMsgpackUnpacker() # Create new unpacker for different msgpack type
|
||||||
|
self.unpacker_bytes = 0
|
||||||
|
if unprocessed_bytes:
|
||||||
|
self.unpacker.feed(unprocessed_bytes)
|
||||||
|
|
||||||
|
# Check if we can encrypt the connection
|
||||||
|
if handshake.get("crypt_supported") and self.ip not in self.server.broken_ssl_ips:
|
||||||
|
if type(handshake["crypt_supported"][0]) is bytes:
|
||||||
|
handshake["crypt_supported"] = [item.decode() for item in handshake["crypt_supported"]] # Backward compatibility
|
||||||
|
|
||||||
|
if self.ip_type == "onion" or self.ip in config.ip_local:
|
||||||
|
crypt = None
|
||||||
|
elif handshake.get("crypt"): # Recommended crypt by server
|
||||||
|
crypt = handshake["crypt"]
|
||||||
|
else: # Select the best supported on both sides
|
||||||
|
crypt = CryptConnection.manager.selectCrypt(handshake["crypt_supported"])
|
||||||
|
|
||||||
|
if crypt:
|
||||||
|
self.crypt = crypt
|
||||||
|
|
||||||
|
if self.type == "in" and handshake.get("onion") and not self.ip_type == "onion": # Set incoming connection's onion address
|
||||||
|
if self.server.ips.get(self.ip) == self:
|
||||||
|
del self.server.ips[self.ip]
|
||||||
|
self.setIp(handshake["onion"] + ".onion")
|
||||||
|
self.log("Changing ip to %s" % self.ip)
|
||||||
|
self.server.ips[self.ip] = self
|
||||||
|
self.updateName()
|
||||||
|
|
||||||
|
self.event_connected.set(True) # Mark handshake as done
|
||||||
|
self.handshake_time = time.time()
|
||||||
|
|
||||||
|
# Handle incoming message
|
||||||
|
def handleMessage(self, message):
|
||||||
|
cmd = message["cmd"]
|
||||||
|
|
||||||
|
self.updateOnlineStatus(successful_activity=True)
|
||||||
|
self.last_message_time = time.time()
|
||||||
|
self.last_cmd_recv = cmd
|
||||||
|
if cmd == "response": # New style response
|
||||||
|
if message["to"] in self.waiting_requests:
|
||||||
|
if self.last_send_time and len(self.waiting_requests) == 1:
|
||||||
|
ping = time.time() - self.last_send_time
|
||||||
|
self.last_ping_delay = ping
|
||||||
|
self.waiting_requests[message["to"]]["evt"].set(message) # Set the response to event
|
||||||
|
del self.waiting_requests[message["to"]]
|
||||||
|
elif message["to"] == 0: # Other peers handshake
|
||||||
|
ping = time.time() - self.start_time
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Handshake response: %s, ping: %s" % (message, ping))
|
||||||
|
self.last_ping_delay = ping
|
||||||
|
# Server switched to crypt, lets do it also if not crypted already
|
||||||
|
if message.get("crypt") and not self.sock_wrapped:
|
||||||
|
crypt = message["crypt"]
|
||||||
|
server = (self.type == "in")
|
||||||
|
self.log("Crypt out connection using: %s (server side: %s, ping: %.3fs)..." % (crypt, server, ping))
|
||||||
|
self.wrapSocket(crypt)
|
||||||
|
|
||||||
|
if not self.sock_wrapped and self.cert_pin:
|
||||||
|
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.setHandshake(message)
|
||||||
|
else:
|
||||||
|
self.log("Unknown response: %s" % message)
|
||||||
|
elif cmd:
|
||||||
|
self.server.num_recv += 1
|
||||||
|
if cmd == "handshake":
|
||||||
|
self.handleHandshake(message)
|
||||||
|
else:
|
||||||
|
self.server.handleRequest(self, message)
|
||||||
|
|
||||||
|
# Incoming handshake set request
|
||||||
|
def handleHandshake(self, message):
|
||||||
|
self.setHandshake(message["params"])
|
||||||
|
data = self.getHandshakeInfo()
|
||||||
|
data["cmd"] = "response"
|
||||||
|
data["to"] = message["req_id"]
|
||||||
|
self.send(data) # Send response to handshake
|
||||||
|
# Sent crypt request to client
|
||||||
|
if self.crypt and not self.sock_wrapped:
|
||||||
|
server = (self.type == "in")
|
||||||
|
self.log("Crypt in connection using: %s (server side: %s)..." % (self.crypt, server))
|
||||||
|
try:
|
||||||
|
self.wrapSocket(self.crypt)
|
||||||
|
except Exception as err:
|
||||||
|
if not config.force_encryption:
|
||||||
|
self.log("Crypt connection error, adding %s:%s as broken ssl. %s" % (self.ip, self.port, Debug.formatException(err)))
|
||||||
|
self.server.broken_ssl_ips[self.ip] = True
|
||||||
|
self.close("Broken ssl")
|
||||||
|
|
||||||
|
if not self.sock_wrapped and self.cert_pin:
|
||||||
|
self.close("Crypt connection error: Socket not encrypted, but certificate pin present")
|
||||||
|
|
||||||
|
# Send data to connection
|
||||||
|
def send(self, message, streaming=False):
|
||||||
|
self.updateOnlineStatus(outgoing_activity=True)
|
||||||
|
self.last_send_time = time.time()
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Send: %s, to: %s, streaming: %s, site: %s, inner_path: %s, req_id: %s" % (
|
||||||
|
message.get("cmd"), message.get("to"), streaming,
|
||||||
|
message.get("params", {}).get("site"), message.get("params", {}).get("inner_path"),
|
||||||
|
message.get("req_id"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.sock:
|
||||||
|
self.log("Send error: missing socket")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not self.connected and message.get("cmd") != "handshake":
|
||||||
|
self.log("Wait for handshake before send request")
|
||||||
|
self.event_connected.get()
|
||||||
|
|
||||||
|
try:
|
||||||
|
stat_key = message.get("cmd", "unknown")
|
||||||
|
if stat_key == "response":
|
||||||
|
stat_key = "response: %s" % self.last_cmd_recv
|
||||||
|
else:
|
||||||
|
self.server.num_sent += 1
|
||||||
|
|
||||||
|
self.server.stat_sent[stat_key]["num"] += 1
|
||||||
|
if streaming:
|
||||||
|
with self.send_lock:
|
||||||
|
bytes_sent = Msgpack.stream(message, self.sock.sendall)
|
||||||
|
self.bytes_sent += bytes_sent
|
||||||
|
self.server.bytes_sent += bytes_sent
|
||||||
|
self.server.stat_sent[stat_key]["bytes"] += bytes_sent
|
||||||
|
message = None
|
||||||
|
else:
|
||||||
|
data = Msgpack.pack(message)
|
||||||
|
self.bytes_sent += len(data)
|
||||||
|
self.server.bytes_sent += len(data)
|
||||||
|
self.server.stat_sent[stat_key]["bytes"] += len(data)
|
||||||
|
message = None
|
||||||
|
with self.send_lock:
|
||||||
|
self.sock.sendall(data)
|
||||||
|
# XXX: Should not be used here:
|
||||||
|
# self.updateOnlineStatus(successful_activity=True)
|
||||||
|
# Looks like self.sock.sendall() returns normally, instead of
|
||||||
|
# raising an Exception (at least, some times).
|
||||||
|
# So the only way of detecting the network activity is self.handleMessage()
|
||||||
|
except Exception as err:
|
||||||
|
self.close("Send error: %s (cmd: %s)" % (err, stat_key))
|
||||||
|
return False
|
||||||
|
self.last_sent_time = time.time()
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Stream file to connection without msgpacking
|
||||||
|
def sendRawfile(self, file, read_bytes):
|
||||||
|
buff = 64 * 1024
|
||||||
|
bytes_left = read_bytes
|
||||||
|
bytes_sent = 0
|
||||||
|
while True:
|
||||||
|
self.last_send_time = time.time()
|
||||||
|
data = file.read(min(bytes_left, buff))
|
||||||
|
bytes_sent += len(data)
|
||||||
|
with self.send_lock:
|
||||||
|
self.sock.sendall(data)
|
||||||
|
bytes_left -= buff
|
||||||
|
if bytes_left <= 0:
|
||||||
|
break
|
||||||
|
self.bytes_sent += bytes_sent
|
||||||
|
self.server.bytes_sent += bytes_sent
|
||||||
|
self.server.stat_sent["raw_file"]["num"] += 1
|
||||||
|
self.server.stat_sent["raw_file"]["bytes"] += bytes_sent
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Create and send a request to peer
|
||||||
|
def request(self, cmd, params={}, stream_to=None):
|
||||||
|
# Last command sent more than 10 sec ago, timeout
|
||||||
|
if self.waiting_requests and self.protocol == "v2" and time.time() - max(self.last_req_time, self.last_recv_time) > 10:
|
||||||
|
self.close("Request %s timeout: %.3fs" % (self.last_cmd_sent, time.time() - self.last_send_time))
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.last_req_time = time.time()
|
||||||
|
self.last_cmd_sent = cmd
|
||||||
|
self.req_id += 1
|
||||||
|
data = {"cmd": cmd, "req_id": self.req_id, "params": params}
|
||||||
|
event = gevent.event.AsyncResult() # Create new event for response
|
||||||
|
self.waiting_requests[self.req_id] = {"evt": event, "cmd": cmd}
|
||||||
|
if stream_to:
|
||||||
|
self.waiting_streams[self.req_id] = stream_to
|
||||||
|
if not self.send(data): # Send request
|
||||||
|
return False
|
||||||
|
res = event.get() # Wait until event solves
|
||||||
|
return res
|
||||||
|
|
||||||
|
def ping(self):
|
||||||
|
s = time.time()
|
||||||
|
response = None
|
||||||
|
with gevent.Timeout(10.0, False):
|
||||||
|
try:
|
||||||
|
response = self.request("ping")
|
||||||
|
except Exception as err:
|
||||||
|
self.log("Ping error: %s" % Debug.formatException(err))
|
||||||
|
if response and "body" in response and response["body"] == b"Pong!":
|
||||||
|
self.last_ping_delay = time.time() - s
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Close connection
|
||||||
|
def close(self, reason="Unknown"):
|
||||||
|
if self.closed:
|
||||||
|
return False # Already closed
|
||||||
|
self.closed = True
|
||||||
|
self.connected = False
|
||||||
|
self.connecting = False
|
||||||
|
if self.event_connected:
|
||||||
|
self.event_connected.set(False)
|
||||||
|
|
||||||
|
self.log(
|
||||||
|
"Closing connection: %s, waiting_requests: %s, sites: %s, buff: %s..." %
|
||||||
|
(reason, len(self.waiting_requests), self.sites, self.incomplete_buff_recv)
|
||||||
|
)
|
||||||
|
for request in self.waiting_requests.values(): # Mark pending requests failed
|
||||||
|
request["evt"].set(False)
|
||||||
|
self.waiting_requests = {}
|
||||||
|
self.waiting_streams = {}
|
||||||
|
self.sites = 0
|
||||||
|
self.server.removeConnection(self) # Remove connection from server registry
|
||||||
|
try:
|
||||||
|
if self.sock:
|
||||||
|
self.sock.shutdown(gevent.socket.SHUT_WR)
|
||||||
|
self.sock.close()
|
||||||
|
except Exception as err:
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log("Close error: %s" % err)
|
||||||
|
|
||||||
|
# Little cleanup
|
||||||
|
self.sock = None
|
||||||
|
self.unpacker = None
|
||||||
|
self.event_connected = None
|
||||||
|
self.crypt = None
|
||||||
|
self.sock_wrapped = False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def updateOnlineStatus(self, outgoing_activity=False, successful_activity=False):
|
||||||
|
self.server.updateOnlineStatus(self,
|
||||||
|
outgoing_activity=outgoing_activity,
|
||||||
|
successful_activity=successful_activity)
|
|
@ -0,0 +1,584 @@
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import socket
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
import msgpack
|
||||||
|
from gevent.server import StreamServer
|
||||||
|
from gevent.pool import Pool
|
||||||
|
import gevent.event
|
||||||
|
|
||||||
|
import util
|
||||||
|
from util import helper
|
||||||
|
from Debug import Debug
|
||||||
|
from .Connection import Connection
|
||||||
|
from Config import config
|
||||||
|
from Crypt import CryptConnection
|
||||||
|
from Crypt import CryptHash
|
||||||
|
from Tor import TorManager
|
||||||
|
from Site import SiteManager
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectionServer(object):
|
||||||
|
def __init__(self, ip=None, port=None, request_handler=None):
|
||||||
|
if not ip:
|
||||||
|
if config.fileserver_ip_type == "ipv6":
|
||||||
|
ip = "::1"
|
||||||
|
else:
|
||||||
|
ip = "127.0.0.1"
|
||||||
|
port = 15441
|
||||||
|
self.ip = ip
|
||||||
|
self.port = port
|
||||||
|
self.last_connection_id = 1 # Connection id incrementer
|
||||||
|
self.log = logging.getLogger("ConnServer")
|
||||||
|
self.port_opened = {}
|
||||||
|
self.peer_blacklist = SiteManager.peer_blacklist
|
||||||
|
|
||||||
|
self.managed_pools = {}
|
||||||
|
|
||||||
|
self.tor_manager = TorManager(self.ip, self.port)
|
||||||
|
self.connections = [] # Connections
|
||||||
|
self.whitelist = config.ip_local # No flood protection on this ips
|
||||||
|
self.ip_incoming = {} # Incoming connections from ip in the last minute to avoid connection flood
|
||||||
|
self.broken_ssl_ips = {} # Peerids of broken ssl connections
|
||||||
|
self.ips = {} # Connection by ip
|
||||||
|
|
||||||
|
self.has_internet = True # Internet outage detection
|
||||||
|
self.internet_online_since = 0
|
||||||
|
self.internet_offline_since = 0
|
||||||
|
self.last_outgoing_internet_activity_time = 0 # Last time the application tried to send any data
|
||||||
|
self.last_successful_internet_activity_time = 0 # Last time the application successfully sent or received any data
|
||||||
|
self.internet_outage_threshold = 60 * 2
|
||||||
|
|
||||||
|
self.stream_server = None
|
||||||
|
self.stream_server_proxy = None
|
||||||
|
self.running = False
|
||||||
|
self.stopping = False
|
||||||
|
self.stopping_event = gevent.event.Event()
|
||||||
|
self.thread_checker = None
|
||||||
|
|
||||||
|
self.thread_pool = Pool(None)
|
||||||
|
self.managed_pools["thread"] = self.thread_pool
|
||||||
|
|
||||||
|
self.stat_recv = defaultdict(lambda: defaultdict(int))
|
||||||
|
self.stat_sent = defaultdict(lambda: defaultdict(int))
|
||||||
|
self.bytes_recv = 0
|
||||||
|
self.bytes_sent = 0
|
||||||
|
self.num_recv = 0
|
||||||
|
self.num_sent = 0
|
||||||
|
|
||||||
|
self.num_incoming = 0
|
||||||
|
self.num_outgoing = 0
|
||||||
|
self.had_external_incoming = False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.timecorrection = 0.0
|
||||||
|
self.pool = Pool(500) # do not accept more than 500 connections
|
||||||
|
self.managed_pools["incoming"] = self.pool
|
||||||
|
|
||||||
|
self.outgoing_pool = Pool(None)
|
||||||
|
self.managed_pools["outgoing"] = self.outgoing_pool
|
||||||
|
|
||||||
|
# Bittorrent style peerid
|
||||||
|
self.peer_id = "-UT3530-%s" % CryptHash.random(12, "base64")
|
||||||
|
|
||||||
|
# Check msgpack version
|
||||||
|
if msgpack.version[0] == 0 and msgpack.version[1] < 4:
|
||||||
|
self.log.error(
|
||||||
|
"Error: Unsupported msgpack version: %s (<0.4.0), please run `sudo apt-get install python-pip; sudo pip install msgpack --upgrade`" %
|
||||||
|
str(msgpack.version)
|
||||||
|
)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if request_handler:
|
||||||
|
self.handleRequest = request_handler
|
||||||
|
|
||||||
|
def start(self, check_connections=True):
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
self.running = True
|
||||||
|
if check_connections:
|
||||||
|
self.thread_checker = self.spawn(self.checkConnections)
|
||||||
|
CryptConnection.manager.loadCerts()
|
||||||
|
if config.tor != "disable":
|
||||||
|
self.tor_manager.start()
|
||||||
|
self.tor_manager.startOnions()
|
||||||
|
if not self.port:
|
||||||
|
self.log.info("No port found, not binding")
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.log.debug("Binding to: %s:%s, (msgpack: %s), supported crypt: %s" % (
|
||||||
|
self.ip, self.port, ".".join(map(str, msgpack.version)),
|
||||||
|
CryptConnection.manager.crypt_supported
|
||||||
|
))
|
||||||
|
try:
|
||||||
|
self.stream_server = StreamServer(
|
||||||
|
(self.ip, self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.info("StreamServer create error: %s" % Debug.formatException(err))
|
||||||
|
|
||||||
|
def listen(self):
|
||||||
|
if not self.running:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.stream_server_proxy:
|
||||||
|
self.spawn(self.listenProxy)
|
||||||
|
try:
|
||||||
|
self.stream_server.serve_forever()
|
||||||
|
except Exception as err:
|
||||||
|
self.log.info("StreamServer listen error: %s" % err)
|
||||||
|
return False
|
||||||
|
self.log.debug("Stopped.")
|
||||||
|
|
||||||
|
def stop(self, ui_websocket=None):
|
||||||
|
self.log.debug("Stopping %s" % self.stream_server)
|
||||||
|
self.stopping = True
|
||||||
|
self.running = False
|
||||||
|
self.stopping_event.set()
|
||||||
|
self.onStop(ui_websocket=ui_websocket)
|
||||||
|
|
||||||
|
def onStop(self, ui_websocket=None):
|
||||||
|
timeout = 30
|
||||||
|
start_time = time.time()
|
||||||
|
join_quantum = 0.1
|
||||||
|
prev_msg = None
|
||||||
|
while True:
|
||||||
|
if time.time() >= start_time + timeout:
|
||||||
|
break
|
||||||
|
|
||||||
|
total_size = 0
|
||||||
|
sizes = {}
|
||||||
|
timestep = 0
|
||||||
|
for name, pool in list(self.managed_pools.items()):
|
||||||
|
timestep += join_quantum
|
||||||
|
pool.join(timeout=join_quantum)
|
||||||
|
size = len(pool)
|
||||||
|
if size:
|
||||||
|
sizes[name] = size
|
||||||
|
total_size += size
|
||||||
|
|
||||||
|
if len(sizes) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
if timestep < 1:
|
||||||
|
time.sleep(1 - timestep)
|
||||||
|
|
||||||
|
# format message
|
||||||
|
s = ""
|
||||||
|
for name, size in sizes.items():
|
||||||
|
s += "%s pool: %s, " % (name, size)
|
||||||
|
msg = "Waiting for tasks in managed pools to stop: %s" % s
|
||||||
|
# Prevent flooding to log
|
||||||
|
if msg != prev_msg:
|
||||||
|
prev_msg = msg
|
||||||
|
self.log.info("%s", msg)
|
||||||
|
|
||||||
|
percent = 100 * (time.time() - start_time) / timeout
|
||||||
|
msg = "File Server: waiting for %s tasks to stop" % total_size
|
||||||
|
self.sendShutdownProgress(ui_websocket, msg, percent)
|
||||||
|
|
||||||
|
for name, pool in list(self.managed_pools.items()):
|
||||||
|
size = len(pool)
|
||||||
|
if size:
|
||||||
|
self.log.info("Killing %s tasks in %s pool", size, name)
|
||||||
|
pool.kill()
|
||||||
|
|
||||||
|
self.sendShutdownProgress(ui_websocket, "File Server stopped. Now to exit.", 100)
|
||||||
|
|
||||||
|
if self.thread_checker:
|
||||||
|
gevent.kill(self.thread_checker)
|
||||||
|
self.thread_checker = None
|
||||||
|
if self.stream_server:
|
||||||
|
self.stream_server.stop()
|
||||||
|
|
||||||
|
def sendShutdownProgress(self, ui_websocket, message, progress):
|
||||||
|
if not ui_websocket:
|
||||||
|
return
|
||||||
|
ui_websocket.cmd("progress", ["shutdown", message, progress])
|
||||||
|
time.sleep(0.01)
|
||||||
|
|
||||||
|
# Sleeps the specified amount of time or until ConnectionServer is stopped
|
||||||
|
def sleep(self, t):
|
||||||
|
if t:
|
||||||
|
self.stopping_event.wait(timeout=t)
|
||||||
|
else:
|
||||||
|
time.sleep(t)
|
||||||
|
|
||||||
|
# Spawns a thread that will be waited for on server being stopped (and killed after a timeout)
|
||||||
|
def spawn(self, *args, **kwargs):
|
||||||
|
thread = self.thread_pool.spawn(*args, **kwargs)
|
||||||
|
return thread
|
||||||
|
|
||||||
|
def closeConnections(self):
|
||||||
|
self.log.debug("Closing all connection: %s" % len(self.connections))
|
||||||
|
for connection in self.connections[:]:
|
||||||
|
connection.close("Close all connections")
|
||||||
|
|
||||||
|
def handleIncomingConnection(self, sock, addr):
|
||||||
|
if not self.allowsAcceptingConnections():
|
||||||
|
sock.close()
|
||||||
|
return False
|
||||||
|
|
||||||
|
ip, port = addr[0:2]
|
||||||
|
ip = ip.lower()
|
||||||
|
if ip.startswith("::ffff:"): # IPv6 to IPv4 mapping
|
||||||
|
ip = ip.replace("::ffff:", "", 1)
|
||||||
|
self.num_incoming += 1
|
||||||
|
|
||||||
|
if not self.had_external_incoming and not helper.isPrivateIp(ip):
|
||||||
|
self.had_external_incoming = True
|
||||||
|
|
||||||
|
# Connection flood protection
|
||||||
|
if ip in self.ip_incoming and ip not in self.whitelist:
|
||||||
|
self.ip_incoming[ip] += 1
|
||||||
|
if self.ip_incoming[ip] > 6: # Allow 6 in 1 minute from same ip
|
||||||
|
self.log.debug("Connection flood detected from %s" % ip)
|
||||||
|
self.sleep(30)
|
||||||
|
sock.close()
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
self.ip_incoming[ip] = 1
|
||||||
|
|
||||||
|
connection = Connection(self, ip, port, sock)
|
||||||
|
self.connections.append(connection)
|
||||||
|
if ip not in config.ip_local:
|
||||||
|
self.ips[ip] = connection
|
||||||
|
connection.handleIncomingConnection(sock)
|
||||||
|
|
||||||
|
def handleMessage(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False):
|
||||||
|
ip_type = self.getIpType(ip)
|
||||||
|
has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get(ip_type, None) == False) and self.tor_manager.start_onions and site
|
||||||
|
if has_per_site_onion: # Site-unique connection for Tor
|
||||||
|
if ip.endswith(".onion"):
|
||||||
|
site_onion = self.tor_manager.getOnion(site.address)
|
||||||
|
else:
|
||||||
|
site_onion = self.tor_manager.getOnion("global")
|
||||||
|
key = ip + site_onion
|
||||||
|
else:
|
||||||
|
key = ip
|
||||||
|
|
||||||
|
# Find connection by ip
|
||||||
|
if key in self.ips:
|
||||||
|
connection = self.ips[key]
|
||||||
|
if not peer_id or connection.handshake.get("peer_id") == peer_id: # Filter by peer_id
|
||||||
|
if not connection.connected and create:
|
||||||
|
succ = connection.event_connected.get() # Wait for connection
|
||||||
|
if not succ:
|
||||||
|
raise Exception("Connection event return error")
|
||||||
|
return connection
|
||||||
|
|
||||||
|
# Recover from connection pool
|
||||||
|
for connection in self.connections:
|
||||||
|
if connection.ip == ip:
|
||||||
|
if peer_id and connection.handshake.get("peer_id") != peer_id: # Does not match
|
||||||
|
continue
|
||||||
|
if ip.endswith(".onion") and self.tor_manager.start_onions and ip.replace(".onion", "") != connection.target_onion:
|
||||||
|
# For different site
|
||||||
|
continue
|
||||||
|
if not connection.connected and create:
|
||||||
|
succ = connection.event_connected.get() # Wait for connection
|
||||||
|
if not succ:
|
||||||
|
raise Exception("Connection event return error")
|
||||||
|
return connection
|
||||||
|
|
||||||
|
# No connection found
|
||||||
|
if create and self.allowsCreatingConnections():
|
||||||
|
if port == 0:
|
||||||
|
raise Exception("This peer is not connectable")
|
||||||
|
|
||||||
|
if (ip, port) in self.peer_blacklist and not is_tracker_connection:
|
||||||
|
raise Exception("This peer is blacklisted")
|
||||||
|
|
||||||
|
try:
|
||||||
|
#self.log.info("Connection to: %s:%s", ip, port)
|
||||||
|
if has_per_site_onion: # Lock connection to site
|
||||||
|
connection = Connection(self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection)
|
||||||
|
else:
|
||||||
|
connection = Connection(self, ip, port, is_tracker_connection=is_tracker_connection)
|
||||||
|
self.num_outgoing += 1
|
||||||
|
self.ips[key] = connection
|
||||||
|
self.connections.append(connection)
|
||||||
|
connection.log("Connecting... (site: %s)" % site)
|
||||||
|
succ = connection.connect()
|
||||||
|
if not succ:
|
||||||
|
connection.close("Connection event return error")
|
||||||
|
raise Exception("Connection event return error")
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
#self.log.info("Connection error (%s, %s): %s", ip, port, Debug.formatException(err))
|
||||||
|
connection.close("%s Connect error: %s" % (ip, Debug.formatException(err)))
|
||||||
|
raise err
|
||||||
|
|
||||||
|
if len(self.connections) > config.global_connected_limit:
|
||||||
|
self.spawn(self.checkMaxConnections)
|
||||||
|
|
||||||
|
return connection
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def removeConnection(self, connection):
|
||||||
|
# Delete if same as in registry
|
||||||
|
if self.ips.get(connection.ip) == connection:
|
||||||
|
del self.ips[connection.ip]
|
||||||
|
# Site locked connection
|
||||||
|
if connection.target_onion:
|
||||||
|
if self.ips.get(connection.ip + connection.target_onion) == connection:
|
||||||
|
del self.ips[connection.ip + connection.target_onion]
|
||||||
|
# Cert pinned connection
|
||||||
|
if connection.cert_pin and self.ips.get(connection.ip + "#" + connection.cert_pin) == connection:
|
||||||
|
del self.ips[connection.ip + "#" + connection.cert_pin]
|
||||||
|
|
||||||
|
if connection in self.connections:
|
||||||
|
self.connections.remove(connection)
|
||||||
|
|
||||||
|
def checkConnections(self):
|
||||||
|
run_i = 0
|
||||||
|
self.sleep(15)
|
||||||
|
while self.running:
|
||||||
|
run_i += 1
|
||||||
|
self.ip_incoming = {} # Reset connected ips counter
|
||||||
|
s = time.time()
|
||||||
|
self.updateOnlineStatus(None)
|
||||||
|
for connection in self.connections[:]: # Make a copy
|
||||||
|
if connection.ip.endswith(".onion") or config.tor == "always":
|
||||||
|
timeout_multipler = 2
|
||||||
|
else:
|
||||||
|
timeout_multipler = 1
|
||||||
|
|
||||||
|
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
|
||||||
|
|
||||||
|
if connection.unpacker and idle > 30:
|
||||||
|
# Delete the unpacker if not needed
|
||||||
|
del connection.unpacker
|
||||||
|
connection.unpacker = None
|
||||||
|
|
||||||
|
elif connection.last_cmd_sent == "announce" and idle > 20: # Bootstrapper connection close after 20 sec
|
||||||
|
connection.close("[Cleanup] Tracker connection, idle: %.3fs" % idle)
|
||||||
|
|
||||||
|
if idle > 60 * 60:
|
||||||
|
# Wake up after 1h
|
||||||
|
connection.close("[Cleanup] After wakeup, idle: %.3fs" % idle)
|
||||||
|
|
||||||
|
elif idle > 20 * 60 and connection.last_send_time < time.time() - 10:
|
||||||
|
# Idle more than 20 min and we have not sent request in last 10 sec
|
||||||
|
if not connection.ping():
|
||||||
|
connection.close("[Cleanup] Ping timeout")
|
||||||
|
|
||||||
|
elif idle > 10 * timeout_multipler and connection.incomplete_buff_recv > 0:
|
||||||
|
# Incomplete data with more than 10 sec idle
|
||||||
|
connection.close("[Cleanup] Connection buff stalled")
|
||||||
|
|
||||||
|
elif idle > 10 * timeout_multipler and connection.protocol == "?": # No connection after 10 sec
|
||||||
|
connection.close(
|
||||||
|
"[Cleanup] Connect timeout: %.3fs" % idle
|
||||||
|
)
|
||||||
|
|
||||||
|
elif idle > 10 * timeout_multipler and connection.waiting_requests and time.time() - connection.last_send_time > 10 * timeout_multipler:
|
||||||
|
# Sent command and no response in 10 sec
|
||||||
|
connection.close(
|
||||||
|
"[Cleanup] Command %s timeout: %.3fs" % (connection.last_cmd_sent, time.time() - connection.last_send_time)
|
||||||
|
)
|
||||||
|
|
||||||
|
elif idle < 60 and connection.bad_actions > 40:
|
||||||
|
connection.close(
|
||||||
|
"[Cleanup] Too many bad actions: %s" % connection.bad_actions
|
||||||
|
)
|
||||||
|
|
||||||
|
elif idle > 5 * 60 and connection.sites == 0:
|
||||||
|
connection.close(
|
||||||
|
"[Cleanup] No site for connection"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif run_i % 90 == 0:
|
||||||
|
# Reset bad action counter every 30 min
|
||||||
|
connection.bad_actions = 0
|
||||||
|
|
||||||
|
self.timecorrection = self.getTimecorrection()
|
||||||
|
|
||||||
|
if time.time() - s > 0.01:
|
||||||
|
self.log.debug("Connection cleanup in %.3fs" % (time.time() - s))
|
||||||
|
|
||||||
|
self.sleep(15)
|
||||||
|
self.log.debug("Checkconnections ended")
|
||||||
|
|
||||||
|
@util.Noparallel(blocking=False)
|
||||||
|
def checkMaxConnections(self):
|
||||||
|
if len(self.connections) < config.global_connected_limit:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
num_connected_before = len(self.connections)
|
||||||
|
self.connections.sort(key=lambda connection: connection.sites)
|
||||||
|
num_closed = 0
|
||||||
|
for connection in self.connections:
|
||||||
|
idle = time.time() - max(connection.last_recv_time, connection.start_time, connection.last_message_time)
|
||||||
|
if idle > 60:
|
||||||
|
connection.close("Connection limit reached")
|
||||||
|
num_closed += 1
|
||||||
|
if num_closed > config.global_connected_limit * 0.1:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.log.debug("Closed %s connections of %s after reached limit %s in %.3fs" % (
|
||||||
|
num_closed, num_connected_before, config.global_connected_limit, time.time() - s
|
||||||
|
))
|
||||||
|
return num_closed
|
||||||
|
|
||||||
|
# Returns True if we should slow down opening new connections as at the moment
|
||||||
|
# there are too many connections being established and not connected completely
|
||||||
|
# (not entered the message loop yet).
|
||||||
|
def shouldThrottleNewConnections(self):
|
||||||
|
threshold = config.simultaneous_connection_throttle_threshold
|
||||||
|
if len(self.connections) <= threshold:
|
||||||
|
return False
|
||||||
|
nr_connections_being_established = 0
|
||||||
|
for connection in self.connections[:]: # Make a copy
|
||||||
|
if connection.connecting and not connection.connected and connection.type == "out":
|
||||||
|
nr_connections_being_established += 1
|
||||||
|
if nr_connections_being_established > threshold:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Internet outage detection
|
||||||
|
def updateOnlineStatus(self, connection, outgoing_activity=False, successful_activity=False):
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
if connection and not connection.is_private_ip:
|
||||||
|
if outgoing_activity:
|
||||||
|
self.last_outgoing_internet_activity_time = now
|
||||||
|
if successful_activity:
|
||||||
|
self.last_successful_internet_activity_time = now
|
||||||
|
self.setInternetStatus(True)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.last_outgoing_internet_activity_time:
|
||||||
|
return
|
||||||
|
|
||||||
|
if (
|
||||||
|
(self.last_successful_internet_activity_time < now - self.internet_outage_threshold)
|
||||||
|
and
|
||||||
|
(self.last_successful_internet_activity_time < self.last_outgoing_internet_activity_time)
|
||||||
|
):
|
||||||
|
self.setInternetStatus(False)
|
||||||
|
return
|
||||||
|
|
||||||
|
# This is the old algorithm just in case we missed something
|
||||||
|
idle = now - self.last_successful_internet_activity_time
|
||||||
|
if idle > max(60, 60 * 10 / max(1, float(len(self.connections)) / 50)):
|
||||||
|
# Offline: Last successful activity more than 60-600sec depending on connection number
|
||||||
|
self.setInternetStatus(False)
|
||||||
|
return
|
||||||
|
|
||||||
|
def setInternetStatus(self, status):
|
||||||
|
if self.has_internet == status:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.has_internet = status
|
||||||
|
|
||||||
|
if self.has_internet:
|
||||||
|
self.internet_online_since = time.time()
|
||||||
|
self.spawn(self.onInternetOnline)
|
||||||
|
else:
|
||||||
|
self.internet_offline_since = time.time()
|
||||||
|
self.spawn(self.onInternetOffline)
|
||||||
|
|
||||||
|
def isInternetOnline(self):
|
||||||
|
return self.has_internet
|
||||||
|
|
||||||
|
def onInternetOnline(self):
|
||||||
|
self.log.info("Internet online")
|
||||||
|
|
||||||
|
def onInternetOffline(self):
|
||||||
|
self.had_external_incoming = False
|
||||||
|
self.log.info("Internet offline")
|
||||||
|
|
||||||
|
def setOfflineMode(self, offline_mode):
|
||||||
|
if config.offline == offline_mode:
|
||||||
|
return
|
||||||
|
config.offline = offline_mode # Yep, awkward
|
||||||
|
if offline_mode:
|
||||||
|
self.log.info("offline mode is ON")
|
||||||
|
else:
|
||||||
|
self.log.info("offline mode is OFF")
|
||||||
|
|
||||||
|
def isOfflineMode(self):
|
||||||
|
return config.offline
|
||||||
|
|
||||||
|
def allowsCreatingConnections(self):
|
||||||
|
if self.isOfflineMode():
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def allowsAcceptingConnections(self):
|
||||||
|
if self.isOfflineMode():
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def getTimecorrection(self):
|
||||||
|
corrections = sorted([
|
||||||
|
connection.handshake.get("time") - connection.handshake_time + connection.last_ping_delay
|
||||||
|
for connection in self.connections
|
||||||
|
if connection.handshake.get("time") and connection.last_ping_delay
|
||||||
|
])
|
||||||
|
if len(corrections) < 9:
|
||||||
|
return 0.0
|
||||||
|
mid = int(len(corrections) / 2 - 1)
|
||||||
|
median = (corrections[mid - 1] + corrections[mid] + corrections[mid + 1]) / 3
|
||||||
|
return median
|
||||||
|
|
||||||
|
|
||||||
|
############################################################################
|
||||||
|
|
||||||
|
# Methods for handling network address types
|
||||||
|
# (ipv4, ipv6, onion etc... more to be implemented by plugins)
|
||||||
|
#
|
||||||
|
# All the functions handling network address types have "Ip" in the name.
|
||||||
|
# So it was in the initial codebase, and I keep the naming, since I couldn't
|
||||||
|
# think of a better option.
|
||||||
|
# "IP" is short and quite clear and lets you understand that a variable
|
||||||
|
# contains a peer address or other transport-level address and not
|
||||||
|
# an address of ZeroNet site.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Returns type of the given network address.
|
||||||
|
# Since: 0.8.0
|
||||||
|
# Replaces helper.getIpType() in order to be extensible by plugins.
|
||||||
|
def getIpType(self, ip):
|
||||||
|
if ip.endswith(".onion"):
|
||||||
|
return "onion"
|
||||||
|
elif ":" in ip:
|
||||||
|
return "ipv6"
|
||||||
|
elif re.match(r"[0-9\.]+$", ip):
|
||||||
|
return "ipv4"
|
||||||
|
else:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
# Checks if a network address can be reachable in the current configuration
|
||||||
|
# and returs a string describing why it cannot.
|
||||||
|
# If the network address can be reachable, returns False.
|
||||||
|
# Since: 0.8.0
|
||||||
|
def getIpUnreachability(self, ip):
|
||||||
|
ip_type = self.getIpType(ip)
|
||||||
|
if ip_type == 'onion' and not self.tor_manager.enabled:
|
||||||
|
return "Can't connect to onion addresses, no Tor controller present"
|
||||||
|
if config.tor == "always" and helper.isPrivateIp(ip) and ip not in config.ip_local:
|
||||||
|
return "Can't connect to local IPs in Tor: always mode"
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Returns True if ConnctionServer has means for establishing outgoing
|
||||||
|
# connections to the given address.
|
||||||
|
# Since: 0.8.0
|
||||||
|
def isIpReachable(self, ip):
|
||||||
|
return self.getIpUnreachability(ip) == False
|
|
@ -0,0 +1,2 @@
|
||||||
|
from .ConnectionServer import ConnectionServer
|
||||||
|
from .Connection import Connection
|
|
@ -0,0 +1,162 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
from Db.Db import Db, DbTableError
|
||||||
|
from Config import config
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from Debug import Debug
|
||||||
|
|
||||||
|
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class ContentDb(Db):
|
||||||
|
def __init__(self, path):
|
||||||
|
Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, path)
|
||||||
|
self.foreign_keys = True
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
try:
|
||||||
|
self.schema = self.getSchema()
|
||||||
|
try:
|
||||||
|
self.checkTables()
|
||||||
|
except DbTableError:
|
||||||
|
pass
|
||||||
|
self.log.debug("Checking foreign keys...")
|
||||||
|
foreign_key_error = self.execute("PRAGMA foreign_key_check").fetchone()
|
||||||
|
if foreign_key_error:
|
||||||
|
raise Exception("Database foreign key error: %s" % foreign_key_error)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Error loading content.db: %s, rebuilding..." % Debug.formatException(err))
|
||||||
|
self.close()
|
||||||
|
os.unlink(self.db_path) # Remove and try again
|
||||||
|
Db.__init__(self, {"db_name": "ContentDb", "tables": {}}, self.db_path)
|
||||||
|
self.foreign_keys = True
|
||||||
|
self.schema = self.getSchema()
|
||||||
|
try:
|
||||||
|
self.checkTables()
|
||||||
|
except DbTableError:
|
||||||
|
pass
|
||||||
|
self.site_ids = {}
|
||||||
|
self.sites = {}
|
||||||
|
|
||||||
|
def getSchema(self):
|
||||||
|
schema = {}
|
||||||
|
schema["db_name"] = "ContentDb"
|
||||||
|
schema["version"] = 3
|
||||||
|
schema["tables"] = {}
|
||||||
|
|
||||||
|
if not self.getTableVersion("site"):
|
||||||
|
self.log.debug("Migrating from table version-less content.db")
|
||||||
|
version = int(self.execute("PRAGMA user_version").fetchone()[0])
|
||||||
|
if version > 0:
|
||||||
|
self.checkTables()
|
||||||
|
self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.site.version", "value": 1})
|
||||||
|
self.execute("INSERT INTO keyvalue ?", {"json_id": 0, "key": "table.content.version", "value": 1})
|
||||||
|
|
||||||
|
schema["tables"]["site"] = {
|
||||||
|
"cols": [
|
||||||
|
["site_id", "INTEGER PRIMARY KEY ASC NOT NULL UNIQUE"],
|
||||||
|
["address", "TEXT NOT NULL"]
|
||||||
|
],
|
||||||
|
"indexes": [
|
||||||
|
"CREATE UNIQUE INDEX site_address ON site (address)"
|
||||||
|
],
|
||||||
|
"schema_changed": 1
|
||||||
|
}
|
||||||
|
|
||||||
|
schema["tables"]["content"] = {
|
||||||
|
"cols": [
|
||||||
|
["content_id", "INTEGER PRIMARY KEY UNIQUE NOT NULL"],
|
||||||
|
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
|
||||||
|
["inner_path", "TEXT"],
|
||||||
|
["size", "INTEGER"],
|
||||||
|
["size_files", "INTEGER"],
|
||||||
|
["size_files_optional", "INTEGER"],
|
||||||
|
["modified", "INTEGER"]
|
||||||
|
],
|
||||||
|
"indexes": [
|
||||||
|
"CREATE UNIQUE INDEX content_key ON content (site_id, inner_path)",
|
||||||
|
"CREATE INDEX content_modified ON content (site_id, modified)"
|
||||||
|
],
|
||||||
|
"schema_changed": 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema
|
||||||
|
|
||||||
|
def initSite(self, site):
|
||||||
|
self.sites[site.address] = site
|
||||||
|
|
||||||
|
def needSite(self, site):
|
||||||
|
if site.address not in self.site_ids:
|
||||||
|
self.execute("INSERT OR IGNORE INTO site ?", {"address": site.address})
|
||||||
|
self.site_ids = {}
|
||||||
|
for row in self.execute("SELECT * FROM site"):
|
||||||
|
self.site_ids[row["address"]] = row["site_id"]
|
||||||
|
return self.site_ids[site.address]
|
||||||
|
|
||||||
|
def deleteSite(self, site):
|
||||||
|
site_id = self.site_ids.get(site.address, 0)
|
||||||
|
if site_id:
|
||||||
|
self.execute("DELETE FROM site WHERE site_id = :site_id", {"site_id": site_id})
|
||||||
|
del self.site_ids[site.address]
|
||||||
|
del self.sites[site.address]
|
||||||
|
|
||||||
|
def setContent(self, site, inner_path, content, size=0):
|
||||||
|
self.insertOrUpdate("content", {
|
||||||
|
"size": size,
|
||||||
|
"size_files": sum([val["size"] for key, val in content.get("files", {}).items()]),
|
||||||
|
"size_files_optional": sum([val["size"] for key, val in content.get("files_optional", {}).items()]),
|
||||||
|
"modified": int(content.get("modified", 0))
|
||||||
|
}, {
|
||||||
|
"site_id": self.site_ids.get(site.address, 0),
|
||||||
|
"inner_path": inner_path
|
||||||
|
})
|
||||||
|
|
||||||
|
def deleteContent(self, site, inner_path):
|
||||||
|
self.execute("DELETE FROM content WHERE ?", {"site_id": self.site_ids.get(site.address, 0), "inner_path": inner_path})
|
||||||
|
|
||||||
|
def loadDbDict(self, site):
|
||||||
|
res = self.execute(
|
||||||
|
"SELECT GROUP_CONCAT(inner_path, '|') AS inner_paths FROM content WHERE ?",
|
||||||
|
{"site_id": self.site_ids.get(site.address, 0)}
|
||||||
|
)
|
||||||
|
row = res.fetchone()
|
||||||
|
if row and row["inner_paths"]:
|
||||||
|
inner_paths = row["inner_paths"].split("|")
|
||||||
|
return dict.fromkeys(inner_paths, False)
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def getTotalSize(self, site, ignore=None):
|
||||||
|
params = {"site_id": self.site_ids.get(site.address, 0)}
|
||||||
|
if ignore:
|
||||||
|
params["not__inner_path"] = ignore
|
||||||
|
res = self.execute("SELECT SUM(size) + SUM(size_files) AS size, SUM(size_files_optional) AS size_optional FROM content WHERE ?", params)
|
||||||
|
row = dict(res.fetchone())
|
||||||
|
|
||||||
|
if not row["size"]:
|
||||||
|
row["size"] = 0
|
||||||
|
if not row["size_optional"]:
|
||||||
|
row["size_optional"] = 0
|
||||||
|
|
||||||
|
return row["size"], row["size_optional"]
|
||||||
|
|
||||||
|
def listModified(self, site, after=None, before=None):
|
||||||
|
params = {"site_id": self.site_ids.get(site.address, 0)}
|
||||||
|
if after:
|
||||||
|
params["modified>"] = after
|
||||||
|
if before:
|
||||||
|
params["modified<"] = before
|
||||||
|
res = self.execute("SELECT inner_path, modified FROM content WHERE ?", params)
|
||||||
|
return {row["inner_path"]: row["modified"] for row in res}
|
||||||
|
|
||||||
|
content_dbs = {}
|
||||||
|
|
||||||
|
|
||||||
|
def getContentDb(path=None):
|
||||||
|
if not path:
|
||||||
|
path = "%s/content.db" % config.data_dir
|
||||||
|
if path not in content_dbs:
|
||||||
|
content_dbs[path] = ContentDb(path)
|
||||||
|
content_dbs[path].init()
|
||||||
|
return content_dbs[path]
|
||||||
|
|
||||||
|
getContentDb() # Pre-connect to default one
|
|
@ -0,0 +1,155 @@
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from . import ContentDb
|
||||||
|
from Debug import Debug
|
||||||
|
from Config import config
|
||||||
|
|
||||||
|
|
||||||
|
class ContentDbDict(dict):
|
||||||
|
def __init__(self, site, *args, **kwargs):
|
||||||
|
s = time.time()
|
||||||
|
self.site = site
|
||||||
|
self.cached_keys = []
|
||||||
|
self.log = self.site.log
|
||||||
|
self.db = ContentDb.getContentDb()
|
||||||
|
self.db_id = self.db.needSite(site)
|
||||||
|
self.num_loaded = 0
|
||||||
|
super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database
|
||||||
|
self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids)))
|
||||||
|
|
||||||
|
def loadItem(self, key):
|
||||||
|
try:
|
||||||
|
self.num_loaded += 1
|
||||||
|
if self.num_loaded % 100 == 0:
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack()))
|
||||||
|
else:
|
||||||
|
self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key))
|
||||||
|
content = self.site.storage.loadJson(key)
|
||||||
|
dict.__setitem__(self, key, content)
|
||||||
|
except IOError:
|
||||||
|
if dict.get(self, key):
|
||||||
|
self.__delitem__(key) # File not exists anymore
|
||||||
|
raise KeyError(key)
|
||||||
|
|
||||||
|
self.addCachedKey(key)
|
||||||
|
self.checkLimit()
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
def getItemSize(self, key):
|
||||||
|
return self.site.storage.getSize(key)
|
||||||
|
|
||||||
|
# Only keep last 10 accessed json in memory
|
||||||
|
def checkLimit(self):
|
||||||
|
if len(self.cached_keys) > 10:
|
||||||
|
key_deleted = self.cached_keys.pop(0)
|
||||||
|
dict.__setitem__(self, key_deleted, False)
|
||||||
|
|
||||||
|
def addCachedKey(self, key):
|
||||||
|
if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char
|
||||||
|
self.cached_keys.append(key)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
val = dict.get(self, key)
|
||||||
|
if val: # Already loaded
|
||||||
|
return val
|
||||||
|
elif val is None: # Unknown key
|
||||||
|
raise KeyError(key)
|
||||||
|
elif val is False: # Loaded before, but purged from cache
|
||||||
|
return self.loadItem(key)
|
||||||
|
|
||||||
|
def __setitem__(self, key, val):
|
||||||
|
self.addCachedKey(key)
|
||||||
|
self.checkLimit()
|
||||||
|
size = self.getItemSize(key)
|
||||||
|
self.db.setContent(self.site, key, val, size)
|
||||||
|
dict.__setitem__(self, key, val)
|
||||||
|
|
||||||
|
def __delitem__(self, key):
|
||||||
|
self.db.deleteContent(self.site, key)
|
||||||
|
dict.__delitem__(self, key)
|
||||||
|
try:
|
||||||
|
self.cached_keys.remove(key)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def iteritems(self):
|
||||||
|
for key in dict.keys(self):
|
||||||
|
try:
|
||||||
|
val = self[key]
|
||||||
|
except Exception as err:
|
||||||
|
self.log.warning("Error loading %s: %s" % (key, err))
|
||||||
|
continue
|
||||||
|
yield key, val
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
back = []
|
||||||
|
for key in dict.keys(self):
|
||||||
|
try:
|
||||||
|
val = self[key]
|
||||||
|
except Exception as err:
|
||||||
|
self.log.warning("Error loading %s: %s" % (key, err))
|
||||||
|
continue
|
||||||
|
back.append((key, val))
|
||||||
|
return back
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
back = []
|
||||||
|
for key, val in dict.iteritems(self):
|
||||||
|
if not val:
|
||||||
|
try:
|
||||||
|
val = self.loadItem(key)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
back.append(val)
|
||||||
|
return back
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self.__getitem__(key)
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
except Exception as err:
|
||||||
|
self.site.bad_files[key] = self.site.bad_files.get(key, 1)
|
||||||
|
dict.__delitem__(self, key)
|
||||||
|
self.log.warning("Error loading %s: %s" % (key, err))
|
||||||
|
return default
|
||||||
|
|
||||||
|
def execute(self, query, params={}):
|
||||||
|
params["site_id"] = self.db_id
|
||||||
|
return self.db.execute(query, params)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import psutil
|
||||||
|
process = psutil.Process(os.getpid())
|
||||||
|
s_mem = process.memory_info()[0] / float(2 ** 20)
|
||||||
|
root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27"
|
||||||
|
contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root)
|
||||||
|
print("Init len", len(contents))
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
for dir_name in os.listdir(root + "/data/users/")[0:8000]:
|
||||||
|
contents["data/users/%s/content.json" % dir_name]
|
||||||
|
print("Load: %.3fs" % (time.time() - s))
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
found = 0
|
||||||
|
for key, val in contents.items():
|
||||||
|
found += 1
|
||||||
|
assert key
|
||||||
|
assert val
|
||||||
|
print("Found:", found)
|
||||||
|
print("Iteritem: %.3fs" % (time.time() - s))
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
found = 0
|
||||||
|
for key in list(contents.keys()):
|
||||||
|
found += 1
|
||||||
|
assert key in contents
|
||||||
|
print("In: %.3fs" % (time.time() - s))
|
||||||
|
|
||||||
|
print("Len:", len(list(contents.values())), len(list(contents.keys())))
|
||||||
|
|
||||||
|
print("Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1 @@
|
||||||
|
from .ContentManager import ContentManager
|
|
@ -0,0 +1,4 @@
|
||||||
|
from Config import config
|
||||||
|
from util import ThreadPool
|
||||||
|
|
||||||
|
thread_pool_crypt = ThreadPool.ThreadPool(config.threads_crypt)
|
|
@ -0,0 +1,101 @@
|
||||||
|
import logging
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
from util.Electrum import dbl_format
|
||||||
|
from Config import config
|
||||||
|
|
||||||
|
import util.OpensslFindPatch
|
||||||
|
|
||||||
|
lib_verify_best = "sslcrypto"
|
||||||
|
|
||||||
|
from lib import sslcrypto
|
||||||
|
sslcurve_native = sslcrypto.ecc.get_curve("secp256k1")
|
||||||
|
sslcurve_fallback = sslcrypto.fallback.ecc.get_curve("secp256k1")
|
||||||
|
sslcurve = sslcurve_native
|
||||||
|
|
||||||
|
def loadLib(lib_name, silent=False):
|
||||||
|
global sslcurve, libsecp256k1message, lib_verify_best
|
||||||
|
if lib_name == "libsecp256k1":
|
||||||
|
s = time.time()
|
||||||
|
from lib import libsecp256k1message
|
||||||
|
import coincurve
|
||||||
|
lib_verify_best = "libsecp256k1"
|
||||||
|
if not silent:
|
||||||
|
logging.info(
|
||||||
|
"Libsecpk256k1 loaded: %s in %.3fs" %
|
||||||
|
(type(coincurve._libsecp256k1.lib).__name__, time.time() - s)
|
||||||
|
)
|
||||||
|
elif lib_name == "sslcrypto":
|
||||||
|
sslcurve = sslcurve_native
|
||||||
|
if sslcurve_native == sslcurve_fallback:
|
||||||
|
logging.warning("SSLCurve fallback loaded instead of native")
|
||||||
|
elif lib_name == "sslcrypto_fallback":
|
||||||
|
sslcurve = sslcurve_fallback
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not config.use_libsecp256k1:
|
||||||
|
raise Exception("Disabled by config")
|
||||||
|
loadLib("libsecp256k1")
|
||||||
|
lib_verify_best = "libsecp256k1"
|
||||||
|
except Exception as err:
|
||||||
|
logging.info("Libsecp256k1 load failed: %s" % err)
|
||||||
|
|
||||||
|
|
||||||
|
def newPrivatekey(): # Return new private key
|
||||||
|
return sslcurve.private_to_wif(sslcurve.new_private_key()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def newSeed():
|
||||||
|
return binascii.hexlify(sslcurve.new_private_key()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def hdPrivatekey(seed, child):
|
||||||
|
# Too large child id could cause problems
|
||||||
|
privatekey_bin = sslcurve.derive_child(seed.encode(), child % 100000000)
|
||||||
|
return sslcurve.private_to_wif(privatekey_bin).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def privatekeyToAddress(privatekey): # Return address from private key
|
||||||
|
try:
|
||||||
|
if len(privatekey) == 64:
|
||||||
|
privatekey_bin = bytes.fromhex(privatekey)
|
||||||
|
else:
|
||||||
|
privatekey_bin = sslcurve.wif_to_private(privatekey.encode())
|
||||||
|
return sslcurve.private_to_address(privatekey_bin).decode()
|
||||||
|
except Exception: # Invalid privatekey
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def sign(data, privatekey): # Return sign to data using private key
|
||||||
|
if privatekey.startswith("23") and len(privatekey) > 52:
|
||||||
|
return None # Old style private key not supported
|
||||||
|
return base64.b64encode(sslcurve.sign(
|
||||||
|
data.encode(),
|
||||||
|
sslcurve.wif_to_private(privatekey.encode()),
|
||||||
|
recoverable=True,
|
||||||
|
hash=dbl_format
|
||||||
|
)).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def verify(data, valid_address, sign, lib_verify=None): # Verify data using address and sign
|
||||||
|
if not lib_verify:
|
||||||
|
lib_verify = lib_verify_best
|
||||||
|
|
||||||
|
if not sign:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if lib_verify == "libsecp256k1":
|
||||||
|
sign_address = libsecp256k1message.recover_address(data.encode("utf8"), sign).decode("utf8")
|
||||||
|
elif lib_verify in ("sslcrypto", "sslcrypto_fallback"):
|
||||||
|
publickey = sslcurve.recover(base64.b64decode(sign), data.encode(), hash=dbl_format)
|
||||||
|
sign_address = sslcurve.public_to_address(publickey).decode()
|
||||||
|
else:
|
||||||
|
raise Exception("No library enabled for signature verification")
|
||||||
|
|
||||||
|
if type(valid_address) is list: # Any address in the list
|
||||||
|
return sign_address in valid_address
|
||||||
|
else: # One possible address
|
||||||
|
return sign_address == valid_address
|
|
@ -0,0 +1,217 @@
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import ssl
|
||||||
|
import hashlib
|
||||||
|
import random
|
||||||
|
|
||||||
|
from Config import config
|
||||||
|
from util import helper
|
||||||
|
|
||||||
|
|
||||||
|
class CryptConnectionManager:
|
||||||
|
def __init__(self):
|
||||||
|
if config.openssl_bin_file:
|
||||||
|
self.openssl_bin = config.openssl_bin_file
|
||||||
|
elif sys.platform.startswith("win"):
|
||||||
|
self.openssl_bin = "tools\\openssl\\openssl.exe"
|
||||||
|
elif config.dist_type.startswith("bundle_linux"):
|
||||||
|
self.openssl_bin = "../runtime/bin/openssl"
|
||||||
|
else:
|
||||||
|
self.openssl_bin = "openssl"
|
||||||
|
|
||||||
|
self.context_client = None
|
||||||
|
self.context_server = None
|
||||||
|
|
||||||
|
self.openssl_conf_template = "src/lib/openssl/openssl.cnf"
|
||||||
|
self.openssl_conf = config.data_dir + "/openssl.cnf"
|
||||||
|
|
||||||
|
self.openssl_env = {
|
||||||
|
"OPENSSL_CONF": self.openssl_conf,
|
||||||
|
"RANDFILE": config.data_dir + "/openssl-rand.tmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
self.crypt_supported = [] # Supported cryptos
|
||||||
|
|
||||||
|
self.cacert_pem = config.data_dir + "/cacert-rsa.pem"
|
||||||
|
self.cakey_pem = config.data_dir + "/cakey-rsa.pem"
|
||||||
|
self.cert_pem = config.data_dir + "/cert-rsa.pem"
|
||||||
|
self.cert_csr = config.data_dir + "/cert-rsa.csr"
|
||||||
|
self.key_pem = config.data_dir + "/key-rsa.pem"
|
||||||
|
|
||||||
|
self.log = logging.getLogger("CryptConnectionManager")
|
||||||
|
self.log.debug("Version: %s" % ssl.OPENSSL_VERSION)
|
||||||
|
|
||||||
|
self.fakedomains = [
|
||||||
|
"yahoo.com", "amazon.com", "live.com", "microsoft.com", "mail.ru", "csdn.net", "bing.com",
|
||||||
|
"amazon.co.jp", "office.com", "imdb.com", "msn.com", "samsung.com", "huawei.com", "ztedevices.com",
|
||||||
|
"godaddy.com", "w3.org", "gravatar.com", "creativecommons.org", "hatena.ne.jp",
|
||||||
|
"adobe.com", "opera.com", "apache.org", "rambler.ru", "one.com", "nationalgeographic.com",
|
||||||
|
"networksolutions.com", "php.net", "python.org", "phoca.cz", "debian.org", "ubuntu.com",
|
||||||
|
"nazwa.pl", "symantec.com"
|
||||||
|
]
|
||||||
|
|
||||||
|
def createSslContexts(self):
|
||||||
|
if self.context_server and self.context_client:
|
||||||
|
return False
|
||||||
|
ciphers = "ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:AES128-SHA256:AES256-SHA:"
|
||||||
|
ciphers += "!aNULL:!eNULL:!EXPORT:!DSS:!DES:!RC4:!3DES:!MD5:!PSK"
|
||||||
|
|
||||||
|
if hasattr(ssl, "PROTOCOL_TLS"):
|
||||||
|
protocol = ssl.PROTOCOL_TLS
|
||||||
|
else:
|
||||||
|
protocol = ssl.PROTOCOL_TLSv1_2
|
||||||
|
self.context_client = ssl.SSLContext(protocol)
|
||||||
|
self.context_client.check_hostname = False
|
||||||
|
self.context_client.verify_mode = ssl.CERT_NONE
|
||||||
|
|
||||||
|
self.context_server = ssl.SSLContext(protocol)
|
||||||
|
self.context_server.load_cert_chain(self.cert_pem, self.key_pem)
|
||||||
|
|
||||||
|
for ctx in (self.context_client, self.context_server):
|
||||||
|
ctx.set_ciphers(ciphers)
|
||||||
|
ctx.options |= ssl.OP_NO_COMPRESSION
|
||||||
|
try:
|
||||||
|
ctx.set_alpn_protocols(["h2", "http/1.1"])
|
||||||
|
ctx.set_npn_protocols(["h2", "http/1.1"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Select crypt that supported by both sides
|
||||||
|
# Return: Name of the crypto
|
||||||
|
def selectCrypt(self, client_supported):
|
||||||
|
for crypt in self.crypt_supported:
|
||||||
|
if crypt in client_supported:
|
||||||
|
return crypt
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Wrap socket for crypt
|
||||||
|
# Return: wrapped socket
|
||||||
|
def wrapSocket(self, sock, crypt, server=False, cert_pin=None):
|
||||||
|
if crypt == "tls-rsa":
|
||||||
|
if server:
|
||||||
|
sock_wrapped = self.context_server.wrap_socket(sock, server_side=True)
|
||||||
|
else:
|
||||||
|
sock_wrapped = self.context_client.wrap_socket(sock, server_hostname=random.choice(self.fakedomains))
|
||||||
|
if cert_pin:
|
||||||
|
cert_hash = hashlib.sha256(sock_wrapped.getpeercert(True)).hexdigest()
|
||||||
|
if cert_hash != cert_pin:
|
||||||
|
raise Exception("Socket certificate does not match (%s != %s)" % (cert_hash, cert_pin))
|
||||||
|
return sock_wrapped
|
||||||
|
else:
|
||||||
|
return sock
|
||||||
|
|
||||||
|
def removeCerts(self):
|
||||||
|
if config.keep_ssl_cert:
|
||||||
|
return False
|
||||||
|
for file_name in ["cert-rsa.pem", "key-rsa.pem", "cacert-rsa.pem", "cakey-rsa.pem", "cacert-rsa.srl", "cert-rsa.csr", "openssl-rand.tmp"]:
|
||||||
|
file_path = "%s/%s" % (config.data_dir, file_name)
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
os.unlink(file_path)
|
||||||
|
|
||||||
|
# Load and create cert files is necessary
|
||||||
|
def loadCerts(self):
|
||||||
|
if config.disable_encryption:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self.createSslRsaCert() and "tls-rsa" not in self.crypt_supported:
|
||||||
|
self.crypt_supported.append("tls-rsa")
|
||||||
|
|
||||||
|
# Try to create RSA server cert + sign for connection encryption
|
||||||
|
# Return: True on success
|
||||||
|
def createSslRsaCert(self):
|
||||||
|
casubjects = [
|
||||||
|
"/C=US/O=Amazon/OU=Server CA 1B/CN=Amazon",
|
||||||
|
"/C=US/O=Let's Encrypt/CN=Let's Encrypt Authority X3",
|
||||||
|
"/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert SHA2 High Assurance Server CA",
|
||||||
|
"/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Domain Validation Secure Server CA"
|
||||||
|
]
|
||||||
|
self.openssl_env['CN'] = random.choice(self.fakedomains)
|
||||||
|
|
||||||
|
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
|
||||||
|
self.createSslContexts()
|
||||||
|
return True # Files already exits
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Replace variables in config template
|
||||||
|
conf_template = open(self.openssl_conf_template).read()
|
||||||
|
conf_template = conf_template.replace("$ENV::CN", self.openssl_env['CN'])
|
||||||
|
open(self.openssl_conf, "w").write(conf_template)
|
||||||
|
|
||||||
|
# Generate CAcert and CAkey
|
||||||
|
cmd_params = helper.shellquote(
|
||||||
|
self.openssl_bin,
|
||||||
|
self.openssl_conf,
|
||||||
|
random.choice(casubjects),
|
||||||
|
self.cakey_pem,
|
||||||
|
self.cacert_pem
|
||||||
|
)
|
||||||
|
cmd = "%s req -new -newkey rsa:2048 -days 3650 -nodes -x509 -config %s -subj %s -keyout %s -out %s -batch" % cmd_params
|
||||||
|
self.log.debug("Generating RSA CAcert and CAkey PEM files...")
|
||||||
|
self.log.debug("Running: %s" % cmd)
|
||||||
|
proc = subprocess.Popen(
|
||||||
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
|
)
|
||||||
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
|
proc.wait()
|
||||||
|
|
||||||
|
if not (os.path.isfile(self.cacert_pem) and os.path.isfile(self.cakey_pem)):
|
||||||
|
self.log.error("RSA ECC SSL CAcert generation failed, CAcert or CAkey files not exist. (%s)" % back)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
self.log.debug("Result: %s" % back)
|
||||||
|
|
||||||
|
# Generate certificate key and signing request
|
||||||
|
cmd_params = helper.shellquote(
|
||||||
|
self.openssl_bin,
|
||||||
|
self.key_pem,
|
||||||
|
self.cert_csr,
|
||||||
|
"/CN=" + self.openssl_env['CN'],
|
||||||
|
self.openssl_conf,
|
||||||
|
)
|
||||||
|
cmd = "%s req -new -newkey rsa:2048 -keyout %s -out %s -subj %s -sha256 -nodes -batch -config %s" % cmd_params
|
||||||
|
self.log.debug("Generating certificate key and signing request...")
|
||||||
|
proc = subprocess.Popen(
|
||||||
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
|
)
|
||||||
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
|
proc.wait()
|
||||||
|
self.log.debug("Running: %s\n%s" % (cmd, back))
|
||||||
|
|
||||||
|
# Sign request and generate certificate
|
||||||
|
cmd_params = helper.shellquote(
|
||||||
|
self.openssl_bin,
|
||||||
|
self.cert_csr,
|
||||||
|
self.cacert_pem,
|
||||||
|
self.cakey_pem,
|
||||||
|
self.cert_pem,
|
||||||
|
self.openssl_conf
|
||||||
|
)
|
||||||
|
cmd = "%s x509 -req -in %s -CA %s -CAkey %s -set_serial 01 -out %s -days 730 -sha256 -extensions x509_ext -extfile %s" % cmd_params
|
||||||
|
self.log.debug("Generating RSA cert...")
|
||||||
|
proc = subprocess.Popen(
|
||||||
|
cmd, shell=True, stderr=subprocess.STDOUT,
|
||||||
|
stdout=subprocess.PIPE, env=self.openssl_env
|
||||||
|
)
|
||||||
|
back = proc.stdout.read().strip().decode(errors="replace").replace("\r", "")
|
||||||
|
proc.wait()
|
||||||
|
self.log.debug("Running: %s\n%s" % (cmd, back))
|
||||||
|
|
||||||
|
if os.path.isfile(self.cert_pem) and os.path.isfile(self.key_pem):
|
||||||
|
self.createSslContexts()
|
||||||
|
|
||||||
|
# Remove no longer necessary files
|
||||||
|
os.unlink(self.openssl_conf)
|
||||||
|
os.unlink(self.cacert_pem)
|
||||||
|
os.unlink(self.cakey_pem)
|
||||||
|
os.unlink(self.cert_csr)
|
||||||
|
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.error("RSA ECC SSL cert generation failed, cert or key files not exist.")
|
||||||
|
|
||||||
|
|
||||||
|
manager = CryptConnectionManager()
|
|
@ -0,0 +1,340 @@
|
||||||
|
## ZeroNet onion V3 support
|
||||||
|
## The following copied code is copied from stem.util.ed25519 official Tor Project python3 lib
|
||||||
|
## url : https://gitweb.torproject.org/stem.git/tree/stem/util/ed25519.py
|
||||||
|
## the ##modified tag means that the function has been modified respect to the one used by stem lib
|
||||||
|
## the ##custom tag means that the function has been added by me and it's not present on the stem ed25519.py file
|
||||||
|
## every comment i make begins with ##
|
||||||
|
##
|
||||||
|
# The following is copied from...
|
||||||
|
#
|
||||||
|
# https://github.com/pyca/ed25519
|
||||||
|
#
|
||||||
|
# This is under the CC0 license. For more information please see...
|
||||||
|
#
|
||||||
|
# https://github.com/pyca/cryptography/issues/5068
|
||||||
|
|
||||||
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
|
||||||
|
#
|
||||||
|
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
|
||||||
|
# 2013 by Donald Stufft <donald@stufft.io>
|
||||||
|
# 2013 by Alex Gaynor <alex.gaynor@gmail.com>
|
||||||
|
# 2013 by Greg Price <price@mit.edu>
|
||||||
|
#
|
||||||
|
# To the extent possible under law, the author(s) have dedicated all copyright
|
||||||
|
# and related and neighboring rights to this software to the public domain
|
||||||
|
# worldwide. This software is distributed without any warranty.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the CC0 Public Domain Dedication along
|
||||||
|
# with this software. If not, see
|
||||||
|
# <http://creativecommons.org/publicdomain/zero/1.0/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
NB: This code is not safe for use with secret keys or secret data.
|
||||||
|
The only safe use of this code is for verifying signatures on public messages.
|
||||||
|
|
||||||
|
Functions for computing the public key of a secret key and for signing
|
||||||
|
a message are included, namely publickey_unsafe and signature_unsafe,
|
||||||
|
for testing purposes only.
|
||||||
|
|
||||||
|
The root of the problem is that Python's long-integer arithmetic is
|
||||||
|
not designed for use in cryptography. Specifically, it may take more
|
||||||
|
or less time to execute an operation depending on the values of the
|
||||||
|
inputs, and its memory access patterns may also depend on the inputs.
|
||||||
|
This opens it to timing and cache side-channel attacks which can
|
||||||
|
disclose data to an attacker. We rely on Python's long-integer
|
||||||
|
arithmetic, so we cannot handle secrets without risking their disclosure.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import operator
|
||||||
|
import sys
|
||||||
|
import base64
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = "1.0.dev0"
|
||||||
|
|
||||||
|
|
||||||
|
# Useful for very coarse version differentiation.
|
||||||
|
PY3 = sys.version_info[0] == 3
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
indexbytes = operator.getitem
|
||||||
|
intlist2bytes = bytes
|
||||||
|
int2byte = operator.methodcaller("to_bytes", 1, "big")
|
||||||
|
else:
|
||||||
|
int2byte = chr
|
||||||
|
range = xrange
|
||||||
|
|
||||||
|
def indexbytes(buf, i):
|
||||||
|
return ord(buf[i])
|
||||||
|
|
||||||
|
def intlist2bytes(l):
|
||||||
|
return b"".join(chr(c) for c in l)
|
||||||
|
|
||||||
|
|
||||||
|
b = 256
|
||||||
|
q = 2 ** 255 - 19
|
||||||
|
l = 2 ** 252 + 27742317777372353535851937790883648493
|
||||||
|
|
||||||
|
|
||||||
|
def H(m):
|
||||||
|
return hashlib.sha512(m).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def pow2(x, p):
|
||||||
|
"""== pow(x, 2**p, q)"""
|
||||||
|
while p > 0:
|
||||||
|
x = x * x % q
|
||||||
|
p -= 1
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def inv(z):
|
||||||
|
"""$= z^{-1} \mod q$, for z != 0"""
|
||||||
|
# Adapted from curve25519_athlon.c in djb's Curve25519.
|
||||||
|
z2 = z * z % q # 2
|
||||||
|
z9 = pow2(z2, 2) * z % q # 9
|
||||||
|
z11 = z9 * z2 % q # 11
|
||||||
|
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
|
||||||
|
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
|
||||||
|
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
|
||||||
|
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
|
||||||
|
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
|
||||||
|
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
|
||||||
|
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
|
||||||
|
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
|
||||||
|
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
|
||||||
|
|
||||||
|
|
||||||
|
d = -121665 * inv(121666) % q
|
||||||
|
I = pow(2, (q - 1) // 4, q)
|
||||||
|
|
||||||
|
|
||||||
|
def xrecover(y):
|
||||||
|
xx = (y * y - 1) * inv(d * y * y + 1)
|
||||||
|
x = pow(xx, (q + 3) // 8, q)
|
||||||
|
|
||||||
|
if (x * x - xx) % q != 0:
|
||||||
|
x = (x * I) % q
|
||||||
|
|
||||||
|
if x % 2 != 0:
|
||||||
|
x = q-x
|
||||||
|
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
By = 4 * inv(5)
|
||||||
|
Bx = xrecover(By)
|
||||||
|
B = (Bx % q, By % q, 1, (Bx * By) % q)
|
||||||
|
ident = (0, 1, 1, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def edwards_add(P, Q):
|
||||||
|
# This is formula sequence 'addition-add-2008-hwcd-3' from
|
||||||
|
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
|
||||||
|
(x1, y1, z1, t1) = P
|
||||||
|
(x2, y2, z2, t2) = Q
|
||||||
|
|
||||||
|
a = (y1-x1)*(y2-x2) % q
|
||||||
|
b = (y1+x1)*(y2+x2) % q
|
||||||
|
c = t1*2*d*t2 % q
|
||||||
|
dd = z1*2*z2 % q
|
||||||
|
e = b - a
|
||||||
|
f = dd - c
|
||||||
|
g = dd + c
|
||||||
|
h = b + a
|
||||||
|
x3 = e*f
|
||||||
|
y3 = g*h
|
||||||
|
t3 = e*h
|
||||||
|
z3 = f*g
|
||||||
|
|
||||||
|
return (x3 % q, y3 % q, z3 % q, t3 % q)
|
||||||
|
|
||||||
|
|
||||||
|
def edwards_double(P):
|
||||||
|
# This is formula sequence 'dbl-2008-hwcd' from
|
||||||
|
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
|
||||||
|
(x1, y1, z1, t1) = P
|
||||||
|
|
||||||
|
a = x1*x1 % q
|
||||||
|
b = y1*y1 % q
|
||||||
|
c = 2*z1*z1 % q
|
||||||
|
# dd = -a
|
||||||
|
e = ((x1+y1)*(x1+y1) - a - b) % q
|
||||||
|
g = -a + b # dd + b
|
||||||
|
f = g - c
|
||||||
|
h = -a - b # dd - b
|
||||||
|
x3 = e*f
|
||||||
|
y3 = g*h
|
||||||
|
t3 = e*h
|
||||||
|
z3 = f*g
|
||||||
|
|
||||||
|
return (x3 % q, y3 % q, z3 % q, t3 % q)
|
||||||
|
|
||||||
|
|
||||||
|
def scalarmult(P, e):
|
||||||
|
if e == 0:
|
||||||
|
return ident
|
||||||
|
Q = scalarmult(P, e // 2)
|
||||||
|
Q = edwards_double(Q)
|
||||||
|
if e & 1:
|
||||||
|
Q = edwards_add(Q, P)
|
||||||
|
return Q
|
||||||
|
|
||||||
|
|
||||||
|
# Bpow[i] == scalarmult(B, 2**i)
|
||||||
|
Bpow = []
|
||||||
|
|
||||||
|
|
||||||
|
def make_Bpow():
|
||||||
|
P = B
|
||||||
|
for i in range(253):
|
||||||
|
Bpow.append(P)
|
||||||
|
P = edwards_double(P)
|
||||||
|
make_Bpow()
|
||||||
|
|
||||||
|
|
||||||
|
def scalarmult_B(e):
|
||||||
|
"""
|
||||||
|
Implements scalarmult(B, e) more efficiently.
|
||||||
|
"""
|
||||||
|
# scalarmult(B, l) is the identity
|
||||||
|
e = e % l
|
||||||
|
P = ident
|
||||||
|
for i in range(253):
|
||||||
|
if e & 1:
|
||||||
|
P = edwards_add(P, Bpow[i])
|
||||||
|
e = e // 2
|
||||||
|
assert e == 0, e
|
||||||
|
return P
|
||||||
|
|
||||||
|
|
||||||
|
def encodeint(y):
|
||||||
|
bits = [(y >> i) & 1 for i in range(b)]
|
||||||
|
return b''.join([
|
||||||
|
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
|
||||||
|
for i in range(b//8)
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def encodepoint(P):
|
||||||
|
(x, y, z, t) = P
|
||||||
|
zi = inv(z)
|
||||||
|
x = (x * zi) % q
|
||||||
|
y = (y * zi) % q
|
||||||
|
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
|
||||||
|
return b''.join([
|
||||||
|
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
|
||||||
|
for i in range(b // 8)
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def bit(h, i):
|
||||||
|
return (indexbytes(h, i // 8) >> (i % 8)) & 1
|
||||||
|
|
||||||
|
##modified
|
||||||
|
def publickey_unsafe(sk):
|
||||||
|
"""
|
||||||
|
Not safe to use with secret keys or secret data.
|
||||||
|
|
||||||
|
See module docstring. This function should be used for testing only.
|
||||||
|
"""
|
||||||
|
##h = H(sk)
|
||||||
|
h = sk
|
||||||
|
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
|
||||||
|
A = scalarmult_B(a)
|
||||||
|
return encodepoint(A)
|
||||||
|
|
||||||
|
##custom
|
||||||
|
## from stem.util.str_tools._to_unicode_impl
|
||||||
|
## from https://gitweb.torproject.org/stem.git/tree/stem/util/str_tools.py#n80
|
||||||
|
def to_unicode_impl(msg):
|
||||||
|
if msg is not None and not isinstance(msg, str):
|
||||||
|
return msg.decode('utf-8', 'replace')
|
||||||
|
else:
|
||||||
|
return msg
|
||||||
|
|
||||||
|
##custom
|
||||||
|
## rewritten stem.descriptor.hidden_service.address_from_identity_key
|
||||||
|
## from https://gitweb.torproject.org/stem.git/tree/stem/descriptor/hidden_service.py#n1088
|
||||||
|
def publickey_to_onionaddress(key):
|
||||||
|
CHECKSUM_CONSTANT = b'.onion checksum'
|
||||||
|
## version = stem.client.datatype.Size.CHAR.pack(3)
|
||||||
|
version = b'\x03'
|
||||||
|
checksum = hashlib.sha3_256(CHECKSUM_CONSTANT + key + version).digest()[:2]
|
||||||
|
onion_address = base64.b32encode(key + checksum + version)
|
||||||
|
return to_unicode_impl(onion_address + b'.onion').lower()
|
||||||
|
|
||||||
|
|
||||||
|
def Hint(m):
|
||||||
|
h = H(m)
|
||||||
|
return sum(2 ** i * bit(h, i) for i in range(2 * b))
|
||||||
|
|
||||||
|
##modified
|
||||||
|
def signature_unsafe(m, sk, pk):
|
||||||
|
"""
|
||||||
|
Not safe to use with secret keys or secret data.
|
||||||
|
|
||||||
|
See module docstring. This function should be used for testing only.
|
||||||
|
"""
|
||||||
|
##h = H(sk)
|
||||||
|
h = sk
|
||||||
|
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
|
||||||
|
r = Hint(
|
||||||
|
intlist2bytes([indexbytes(h, j) for j in range(b // 8, b // 4)]) + m
|
||||||
|
)
|
||||||
|
R = scalarmult_B(r)
|
||||||
|
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
|
||||||
|
return encodepoint(R) + encodeint(S)
|
||||||
|
|
||||||
|
|
||||||
|
def isoncurve(P):
|
||||||
|
(x, y, z, t) = P
|
||||||
|
return (z % q != 0 and
|
||||||
|
x*y % q == z*t % q and
|
||||||
|
(y*y - x*x - z*z - d*t*t) % q == 0)
|
||||||
|
|
||||||
|
|
||||||
|
def decodeint(s):
|
||||||
|
return sum(2 ** i * bit(s, i) for i in range(0, b))
|
||||||
|
|
||||||
|
|
||||||
|
def decodepoint(s):
|
||||||
|
y = sum(2 ** i * bit(s, i) for i in range(0, b - 1))
|
||||||
|
x = xrecover(y)
|
||||||
|
if x & 1 != bit(s, b-1):
|
||||||
|
x = q - x
|
||||||
|
P = (x, y, 1, (x*y) % q)
|
||||||
|
if not isoncurve(P):
|
||||||
|
raise ValueError("decoding point that is not on curve")
|
||||||
|
return P
|
||||||
|
|
||||||
|
|
||||||
|
class SignatureMismatch(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def checkvalid(s, m, pk):
|
||||||
|
"""
|
||||||
|
Not safe to use when any argument is secret.
|
||||||
|
|
||||||
|
See module docstring. This function should be used only for
|
||||||
|
verifying public signatures of public messages.
|
||||||
|
"""
|
||||||
|
if len(s) != b // 4:
|
||||||
|
raise ValueError("signature length is wrong")
|
||||||
|
|
||||||
|
if len(pk) != b // 8:
|
||||||
|
raise ValueError("public-key length is wrong")
|
||||||
|
|
||||||
|
R = decodepoint(s[:b // 8])
|
||||||
|
A = decodepoint(pk)
|
||||||
|
S = decodeint(s[b // 8:b // 4])
|
||||||
|
h = Hint(encodepoint(R) + pk + m)
|
||||||
|
|
||||||
|
(x1, y1, z1, t1) = P = scalarmult_B(S)
|
||||||
|
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
|
||||||
|
|
||||||
|
if (not isoncurve(P) or not isoncurve(Q) or
|
||||||
|
(x1*z2 - x2*z1) % q != 0 or (y1*z2 - y2*z1) % q != 0):
|
||||||
|
raise SignatureMismatch("signature does not pass verification")
|
|
@ -0,0 +1,56 @@
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
|
||||||
|
|
||||||
|
def sha512sum(file, blocksize=65536, format="hexdigest"):
|
||||||
|
if type(file) is str: # Filename specified
|
||||||
|
file = open(file, "rb")
|
||||||
|
hash = hashlib.sha512()
|
||||||
|
for block in iter(lambda: file.read(blocksize), b""):
|
||||||
|
hash.update(block)
|
||||||
|
|
||||||
|
# Truncate to 256bits is good enough
|
||||||
|
if format == "hexdigest":
|
||||||
|
return hash.hexdigest()[0:64]
|
||||||
|
else:
|
||||||
|
return hash.digest()[0:32]
|
||||||
|
|
||||||
|
|
||||||
|
def sha256sum(file, blocksize=65536):
|
||||||
|
if type(file) is str: # Filename specified
|
||||||
|
file = open(file, "rb")
|
||||||
|
hash = hashlib.sha256()
|
||||||
|
for block in iter(lambda: file.read(blocksize), b""):
|
||||||
|
hash.update(block)
|
||||||
|
return hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def random(length=64, encoding="hex"):
|
||||||
|
if encoding == "base64": # Characters: A-Za-z0-9
|
||||||
|
hash = hashlib.sha512(os.urandom(256)).digest()
|
||||||
|
return base64.b64encode(hash).decode("ascii").replace("+", "").replace("/", "").replace("=", "")[0:length]
|
||||||
|
else: # Characters: a-f0-9 (faster)
|
||||||
|
return hashlib.sha512(os.urandom(256)).hexdigest()[0:length]
|
||||||
|
|
||||||
|
|
||||||
|
# Sha512 truncated to 256bits
|
||||||
|
class Sha512t:
|
||||||
|
def __init__(self, data):
|
||||||
|
if data:
|
||||||
|
self.sha512 = hashlib.sha512(data)
|
||||||
|
else:
|
||||||
|
self.sha512 = hashlib.sha512()
|
||||||
|
|
||||||
|
def hexdigest(self):
|
||||||
|
return self.sha512.hexdigest()[0:64]
|
||||||
|
|
||||||
|
def digest(self):
|
||||||
|
return self.sha512.digest()[0:32]
|
||||||
|
|
||||||
|
def update(self, data):
|
||||||
|
return self.sha512.update(data)
|
||||||
|
|
||||||
|
|
||||||
|
def sha512t(data=None):
|
||||||
|
return Sha512t(data)
|
|
@ -0,0 +1,65 @@
|
||||||
|
import base64
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
def sign(data, privatekey):
|
||||||
|
import rsa
|
||||||
|
from rsa import pkcs1
|
||||||
|
from Crypt import CryptEd25519
|
||||||
|
## v3 = 88
|
||||||
|
if len(privatekey) == 88:
|
||||||
|
prv_key = base64.b64decode(privatekey)
|
||||||
|
pub_key = CryptEd25519.publickey_unsafe(prv_key)
|
||||||
|
sign = CryptEd25519.signature_unsafe(data, prv_key, pub_key)
|
||||||
|
return sign
|
||||||
|
|
||||||
|
if "BEGIN RSA PRIVATE KEY" not in privatekey:
|
||||||
|
privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
|
||||||
|
|
||||||
|
priv = rsa.PrivateKey.load_pkcs1(privatekey)
|
||||||
|
sign = rsa.pkcs1.sign(data, priv, 'SHA-256')
|
||||||
|
return sign
|
||||||
|
|
||||||
|
def verify(data, publickey, sign):
|
||||||
|
import rsa
|
||||||
|
from rsa import pkcs1
|
||||||
|
from Crypt import CryptEd25519
|
||||||
|
|
||||||
|
if len(publickey) == 32:
|
||||||
|
try:
|
||||||
|
valid = CryptEd25519.checkvalid(sign, data, publickey)
|
||||||
|
valid = 'SHA-256'
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
valid = False
|
||||||
|
return valid
|
||||||
|
|
||||||
|
pub = rsa.PublicKey.load_pkcs1(publickey, format="DER")
|
||||||
|
try:
|
||||||
|
valid = rsa.pkcs1.verify(data, sign, pub)
|
||||||
|
except pkcs1.VerificationError:
|
||||||
|
valid = False
|
||||||
|
return valid
|
||||||
|
|
||||||
|
def privatekeyToPublickey(privatekey):
|
||||||
|
from Crypt import CryptEd25519
|
||||||
|
import rsa
|
||||||
|
from rsa import pkcs1
|
||||||
|
|
||||||
|
if len(privatekey) == 88:
|
||||||
|
prv_key = base64.b64decode(privatekey)
|
||||||
|
pub_key = CryptEd25519.publickey_unsafe(prv_key)
|
||||||
|
return pub_key
|
||||||
|
|
||||||
|
if "BEGIN RSA PRIVATE KEY" not in privatekey:
|
||||||
|
privatekey = "-----BEGIN RSA PRIVATE KEY-----\n%s\n-----END RSA PRIVATE KEY-----" % privatekey
|
||||||
|
|
||||||
|
priv = rsa.PrivateKey.load_pkcs1(privatekey)
|
||||||
|
pub = rsa.PublicKey(priv.n, priv.e)
|
||||||
|
return pub.save_pkcs1("DER")
|
||||||
|
|
||||||
|
def publickeyToOnion(publickey):
|
||||||
|
from Crypt import CryptEd25519
|
||||||
|
if len(publickey) == 32:
|
||||||
|
addr = CryptEd25519.publickey_to_onionaddress(publickey)[:-6]
|
||||||
|
return addr
|
||||||
|
return base64.b32encode(hashlib.sha1(publickey).digest()[:10]).lower().decode("ascii")
|
|
@ -0,0 +1,519 @@
|
||||||
|
import sqlite3
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import atexit
|
||||||
|
import threading
|
||||||
|
import sys
|
||||||
|
import weakref
|
||||||
|
import errno
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
from Debug import Debug
|
||||||
|
from .DbCursor import DbCursor
|
||||||
|
from util import SafeRe
|
||||||
|
from util import helper
|
||||||
|
from util import ThreadPool
|
||||||
|
from Config import config
|
||||||
|
|
||||||
|
thread_pool_db = ThreadPool.ThreadPool(config.threads_db)
|
||||||
|
|
||||||
|
next_db_id = 0
|
||||||
|
opened_dbs = []
|
||||||
|
|
||||||
|
|
||||||
|
# Close idle databases to save some memory
|
||||||
|
def dbCleanup():
|
||||||
|
while 1:
|
||||||
|
time.sleep(60 * 5)
|
||||||
|
for db in opened_dbs[:]:
|
||||||
|
idle = time.time() - db.last_query_time
|
||||||
|
if idle > 60 * 5 and db.close_idle:
|
||||||
|
db.close("Cleanup")
|
||||||
|
|
||||||
|
|
||||||
|
def dbCommitCheck():
|
||||||
|
while 1:
|
||||||
|
time.sleep(5)
|
||||||
|
for db in opened_dbs[:]:
|
||||||
|
if not db.need_commit:
|
||||||
|
continue
|
||||||
|
|
||||||
|
success = db.commit("Interval")
|
||||||
|
if success:
|
||||||
|
db.need_commit = False
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
|
||||||
|
def dbCloseAll():
|
||||||
|
for db in opened_dbs[:]:
|
||||||
|
db.close("Close all")
|
||||||
|
|
||||||
|
|
||||||
|
gevent.spawn(dbCleanup)
|
||||||
|
gevent.spawn(dbCommitCheck)
|
||||||
|
atexit.register(dbCloseAll)
|
||||||
|
|
||||||
|
|
||||||
|
class DbTableError(Exception):
|
||||||
|
def __init__(self, message, table):
|
||||||
|
super().__init__(message)
|
||||||
|
self.table = table
|
||||||
|
|
||||||
|
|
||||||
|
class Db(object):
|
||||||
|
|
||||||
|
def __init__(self, schema, db_path, close_idle=False):
|
||||||
|
global next_db_id
|
||||||
|
self.db_path = db_path
|
||||||
|
self.db_dir = os.path.dirname(db_path) + "/"
|
||||||
|
self.schema = schema
|
||||||
|
self.schema["version"] = self.schema.get("version", 1)
|
||||||
|
self.conn = None
|
||||||
|
self.cur = None
|
||||||
|
self.cursors = weakref.WeakSet()
|
||||||
|
self.id = next_db_id
|
||||||
|
next_db_id += 1
|
||||||
|
self.progress_sleeping = False
|
||||||
|
self.commiting = False
|
||||||
|
self.log = logging.getLogger("Db#%s:%s" % (self.id, schema["db_name"]))
|
||||||
|
self.table_names = None
|
||||||
|
self.collect_stats = False
|
||||||
|
self.foreign_keys = False
|
||||||
|
self.need_commit = False
|
||||||
|
self.query_stats = {}
|
||||||
|
self.db_keyvalues = {}
|
||||||
|
self.delayed_queue = []
|
||||||
|
self.delayed_queue_thread = None
|
||||||
|
self.close_idle = close_idle
|
||||||
|
self.last_query_time = time.time()
|
||||||
|
self.last_sleep_time = time.time()
|
||||||
|
self.num_execute_since_sleep = 0
|
||||||
|
self.lock = ThreadPool.Lock()
|
||||||
|
self.connect_lock = ThreadPool.Lock()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Db#%s:%s close_idle:%s>" % (id(self), self.db_path, self.close_idle)
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
self.connect_lock.acquire(True)
|
||||||
|
try:
|
||||||
|
if self.conn:
|
||||||
|
self.log.debug("Already connected, connection ignored")
|
||||||
|
return
|
||||||
|
|
||||||
|
if self not in opened_dbs:
|
||||||
|
opened_dbs.append(self)
|
||||||
|
s = time.time()
|
||||||
|
try: # Directory not exist yet
|
||||||
|
os.makedirs(self.db_dir)
|
||||||
|
self.log.debug("Created Db path: %s" % self.db_dir)
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno != errno.EEXIST:
|
||||||
|
raise err
|
||||||
|
if not os.path.isfile(self.db_path):
|
||||||
|
self.log.debug("Db file not exist yet: %s" % self.db_path)
|
||||||
|
self.conn = sqlite3.connect(self.db_path, isolation_level="DEFERRED", check_same_thread=False)
|
||||||
|
self.conn.row_factory = sqlite3.Row
|
||||||
|
self.conn.set_progress_handler(self.progress, 5000000)
|
||||||
|
self.conn.execute('PRAGMA journal_mode=WAL')
|
||||||
|
if self.foreign_keys:
|
||||||
|
self.conn.execute("PRAGMA foreign_keys = ON")
|
||||||
|
self.cur = self.getCursor()
|
||||||
|
|
||||||
|
self.log.debug(
|
||||||
|
"Connected to %s in %.3fs (opened: %s, sqlite version: %s)..." %
|
||||||
|
(self.db_path, time.time() - s, len(opened_dbs), sqlite3.version)
|
||||||
|
)
|
||||||
|
self.log.debug("Connect by thread: %s" % threading.current_thread().ident)
|
||||||
|
self.log.debug("Connect called by %s" % Debug.formatStack())
|
||||||
|
finally:
|
||||||
|
self.connect_lock.release()
|
||||||
|
|
||||||
|
def getConn(self):
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
return self.conn
|
||||||
|
|
||||||
|
def progress(self, *args, **kwargs):
|
||||||
|
self.progress_sleeping = True
|
||||||
|
time.sleep(0.001)
|
||||||
|
self.progress_sleeping = False
|
||||||
|
|
||||||
|
# Execute query using dbcursor
|
||||||
|
def execute(self, query, params=None):
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
return self.cur.execute(query, params)
|
||||||
|
|
||||||
|
@thread_pool_db.wrap
|
||||||
|
def commit(self, reason="Unknown"):
|
||||||
|
if self.progress_sleeping:
|
||||||
|
self.log.debug("Commit ignored: Progress sleeping")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not self.conn:
|
||||||
|
self.log.debug("Commit ignored: No connection")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self.commiting:
|
||||||
|
self.log.debug("Commit ignored: Already commiting")
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
s = time.time()
|
||||||
|
self.commiting = True
|
||||||
|
self.conn.commit()
|
||||||
|
self.log.debug("Commited in %.3fs (reason: %s)" % (time.time() - s, reason))
|
||||||
|
return True
|
||||||
|
except Exception as err:
|
||||||
|
if "SQL statements in progress" in str(err):
|
||||||
|
self.log.warning("Commit delayed: %s (reason: %s)" % (Debug.formatException(err), reason))
|
||||||
|
else:
|
||||||
|
self.log.error("Commit error: %s (reason: %s)" % (Debug.formatException(err), reason))
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
self.commiting = False
|
||||||
|
|
||||||
|
def insertOrUpdate(self, *args, **kwargs):
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
return self.cur.insertOrUpdate(*args, **kwargs)
|
||||||
|
|
||||||
|
def executeDelayed(self, *args, **kwargs):
|
||||||
|
if not self.delayed_queue_thread:
|
||||||
|
self.delayed_queue_thread = gevent.spawn_later(1, self.processDelayed)
|
||||||
|
self.delayed_queue.append(("execute", (args, kwargs)))
|
||||||
|
|
||||||
|
def insertOrUpdateDelayed(self, *args, **kwargs):
|
||||||
|
if not self.delayed_queue:
|
||||||
|
gevent.spawn_later(1, self.processDelayed)
|
||||||
|
self.delayed_queue.append(("insertOrUpdate", (args, kwargs)))
|
||||||
|
|
||||||
|
def processDelayed(self):
|
||||||
|
if not self.delayed_queue:
|
||||||
|
self.log.debug("processDelayed aborted")
|
||||||
|
return
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
cur = self.getCursor()
|
||||||
|
for command, params in self.delayed_queue:
|
||||||
|
if command == "insertOrUpdate":
|
||||||
|
cur.insertOrUpdate(*params[0], **params[1])
|
||||||
|
else:
|
||||||
|
cur.execute(*params[0], **params[1])
|
||||||
|
|
||||||
|
if len(self.delayed_queue) > 10:
|
||||||
|
self.log.debug("Processed %s delayed queue in %.3fs" % (len(self.delayed_queue), time.time() - s))
|
||||||
|
self.delayed_queue = []
|
||||||
|
self.delayed_queue_thread = None
|
||||||
|
|
||||||
|
def close(self, reason="Unknown"):
|
||||||
|
if not self.conn:
|
||||||
|
return False
|
||||||
|
self.connect_lock.acquire()
|
||||||
|
s = time.time()
|
||||||
|
if self.delayed_queue:
|
||||||
|
self.processDelayed()
|
||||||
|
if self in opened_dbs:
|
||||||
|
opened_dbs.remove(self)
|
||||||
|
self.need_commit = False
|
||||||
|
self.commit("Closing: %s" % reason)
|
||||||
|
self.log.debug("Close called by %s" % Debug.formatStack())
|
||||||
|
for i in range(5):
|
||||||
|
if len(self.cursors) == 0:
|
||||||
|
break
|
||||||
|
self.log.debug("Pending cursors: %s" % len(self.cursors))
|
||||||
|
time.sleep(0.1 * i)
|
||||||
|
if len(self.cursors):
|
||||||
|
self.log.debug("Killing cursors: %s" % len(self.cursors))
|
||||||
|
self.conn.interrupt()
|
||||||
|
|
||||||
|
if self.cur:
|
||||||
|
self.cur.close()
|
||||||
|
if self.conn:
|
||||||
|
ThreadPool.main_loop.call(self.conn.close)
|
||||||
|
self.conn = None
|
||||||
|
self.cur = None
|
||||||
|
self.log.debug("%s closed (reason: %s) in %.3fs, opened: %s" % (self.db_path, reason, time.time() - s, len(opened_dbs)))
|
||||||
|
self.connect_lock.release()
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Gets a cursor object to database
|
||||||
|
# Return: Cursor class
|
||||||
|
def getCursor(self):
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
|
||||||
|
cur = DbCursor(self)
|
||||||
|
return cur
|
||||||
|
|
||||||
|
def getSharedCursor(self):
|
||||||
|
if not self.conn:
|
||||||
|
self.connect()
|
||||||
|
return self.cur
|
||||||
|
|
||||||
|
# Get the table version
|
||||||
|
# Return: Table version or None if not exist
|
||||||
|
def getTableVersion(self, table_name):
|
||||||
|
if not self.db_keyvalues: # Get db keyvalues
|
||||||
|
try:
|
||||||
|
res = self.execute("SELECT * FROM keyvalue WHERE json_id=0") # json_id = 0 is internal keyvalues
|
||||||
|
except sqlite3.OperationalError as err: # Table not exist
|
||||||
|
self.log.debug("Query table version error: %s" % err)
|
||||||
|
return False
|
||||||
|
|
||||||
|
for row in res:
|
||||||
|
self.db_keyvalues[row["key"]] = row["value"]
|
||||||
|
|
||||||
|
return self.db_keyvalues.get("table.%s.version" % table_name, 0)
|
||||||
|
|
||||||
|
# Check Db tables
|
||||||
|
# Return: <list> Changed table names
|
||||||
|
def checkTables(self):
|
||||||
|
s = time.time()
|
||||||
|
changed_tables = []
|
||||||
|
|
||||||
|
cur = self.getSharedCursor()
|
||||||
|
|
||||||
|
# Check internal tables
|
||||||
|
# Check keyvalue table
|
||||||
|
changed = cur.needTable("keyvalue", [
|
||||||
|
["keyvalue_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
|
||||||
|
["key", "TEXT"],
|
||||||
|
["value", "INTEGER"],
|
||||||
|
["json_id", "INTEGER"],
|
||||||
|
], [
|
||||||
|
"CREATE UNIQUE INDEX key_id ON keyvalue(json_id, key)"
|
||||||
|
], version=self.schema["version"])
|
||||||
|
if changed:
|
||||||
|
changed_tables.append("keyvalue")
|
||||||
|
|
||||||
|
# Create json table if no custom one defined
|
||||||
|
if "json" not in self.schema.get("tables", {}):
|
||||||
|
if self.schema["version"] == 1:
|
||||||
|
changed = cur.needTable("json", [
|
||||||
|
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
|
||||||
|
["path", "VARCHAR(255)"]
|
||||||
|
], [
|
||||||
|
"CREATE UNIQUE INDEX path ON json(path)"
|
||||||
|
], version=self.schema["version"])
|
||||||
|
elif self.schema["version"] == 2:
|
||||||
|
changed = cur.needTable("json", [
|
||||||
|
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
|
||||||
|
["directory", "VARCHAR(255)"],
|
||||||
|
["file_name", "VARCHAR(255)"]
|
||||||
|
], [
|
||||||
|
"CREATE UNIQUE INDEX path ON json(directory, file_name)"
|
||||||
|
], version=self.schema["version"])
|
||||||
|
elif self.schema["version"] == 3:
|
||||||
|
changed = cur.needTable("json", [
|
||||||
|
["json_id", "INTEGER PRIMARY KEY AUTOINCREMENT"],
|
||||||
|
["site", "VARCHAR(255)"],
|
||||||
|
["directory", "VARCHAR(255)"],
|
||||||
|
["file_name", "VARCHAR(255)"]
|
||||||
|
], [
|
||||||
|
"CREATE UNIQUE INDEX path ON json(directory, site, file_name)"
|
||||||
|
], version=self.schema["version"])
|
||||||
|
if changed:
|
||||||
|
changed_tables.append("json")
|
||||||
|
|
||||||
|
# Check schema tables
|
||||||
|
for table_name, table_settings in self.schema.get("tables", {}).items():
|
||||||
|
try:
|
||||||
|
indexes = table_settings.get("indexes", [])
|
||||||
|
version = table_settings.get("schema_changed", 0)
|
||||||
|
changed = cur.needTable(
|
||||||
|
table_name, table_settings["cols"],
|
||||||
|
indexes, version=version
|
||||||
|
)
|
||||||
|
if changed:
|
||||||
|
changed_tables.append(table_name)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Error creating table %s: %s" % (table_name, Debug.formatException(err)))
|
||||||
|
raise DbTableError(err, table_name)
|
||||||
|
|
||||||
|
self.log.debug("Db check done in %.3fs, changed tables: %s" % (time.time() - s, changed_tables))
|
||||||
|
if changed_tables:
|
||||||
|
self.db_keyvalues = {} # Refresh table version cache
|
||||||
|
|
||||||
|
return changed_tables
|
||||||
|
|
||||||
|
# Update json file to db
|
||||||
|
# Return: True if matched
|
||||||
|
def updateJson(self, file_path, file=None, cur=None):
|
||||||
|
if not file_path.startswith(self.db_dir):
|
||||||
|
return False # Not from the db dir: Skipping
|
||||||
|
relative_path = file_path[len(self.db_dir):] # File path realative to db file
|
||||||
|
|
||||||
|
# Check if filename matches any of mappings in schema
|
||||||
|
matched_maps = []
|
||||||
|
for match, map_settings in self.schema["maps"].items():
|
||||||
|
try:
|
||||||
|
if SafeRe.match(match, relative_path):
|
||||||
|
matched_maps.append(map_settings)
|
||||||
|
except SafeRe.UnsafePatternError as err:
|
||||||
|
self.log.error(err)
|
||||||
|
|
||||||
|
# No match found for the file
|
||||||
|
if not matched_maps:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Load the json file
|
||||||
|
try:
|
||||||
|
if file is None: # Open file is not file object passed
|
||||||
|
file = open(file_path, "rb")
|
||||||
|
|
||||||
|
if file is False: # File deleted
|
||||||
|
data = {}
|
||||||
|
else:
|
||||||
|
if file_path.endswith("json.gz"):
|
||||||
|
file = helper.limitedGzipFile(fileobj=file)
|
||||||
|
|
||||||
|
if sys.version_info.major == 3 and sys.version_info.minor < 6:
|
||||||
|
data = json.loads(file.read().decode("utf8"))
|
||||||
|
else:
|
||||||
|
data = json.load(file)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.debug("Json file %s load error: %s" % (file_path, err))
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
# No cursor specificed
|
||||||
|
if not cur:
|
||||||
|
cur = self.getSharedCursor()
|
||||||
|
cur.logging = False
|
||||||
|
|
||||||
|
# Row for current json file if required
|
||||||
|
if not data or [dbmap for dbmap in matched_maps if "to_keyvalue" in dbmap or "to_table" in dbmap]:
|
||||||
|
json_row = cur.getJsonRow(relative_path)
|
||||||
|
|
||||||
|
# Check matched mappings in schema
|
||||||
|
for dbmap in matched_maps:
|
||||||
|
# Insert non-relational key values
|
||||||
|
if dbmap.get("to_keyvalue"):
|
||||||
|
# Get current values
|
||||||
|
res = cur.execute("SELECT * FROM keyvalue WHERE json_id = ?", (json_row["json_id"],))
|
||||||
|
current_keyvalue = {}
|
||||||
|
current_keyvalue_id = {}
|
||||||
|
for row in res:
|
||||||
|
current_keyvalue[row["key"]] = row["value"]
|
||||||
|
current_keyvalue_id[row["key"]] = row["keyvalue_id"]
|
||||||
|
|
||||||
|
for key in dbmap["to_keyvalue"]:
|
||||||
|
if key not in current_keyvalue: # Keyvalue not exist yet in the db
|
||||||
|
cur.execute(
|
||||||
|
"INSERT INTO keyvalue ?",
|
||||||
|
{"key": key, "value": data.get(key), "json_id": json_row["json_id"]}
|
||||||
|
)
|
||||||
|
elif data.get(key) != current_keyvalue[key]: # Keyvalue different value
|
||||||
|
cur.execute(
|
||||||
|
"UPDATE keyvalue SET value = ? WHERE keyvalue_id = ?",
|
||||||
|
(data.get(key), current_keyvalue_id[key])
|
||||||
|
)
|
||||||
|
|
||||||
|
# Insert data to json table for easier joins
|
||||||
|
if dbmap.get("to_json_table"):
|
||||||
|
directory, file_name = re.match("^(.*?)/*([^/]*)$", relative_path).groups()
|
||||||
|
data_json_row = dict(cur.getJsonRow(directory + "/" + dbmap.get("file_name", file_name)))
|
||||||
|
changed = False
|
||||||
|
for key in dbmap["to_json_table"]:
|
||||||
|
if data.get(key) != data_json_row.get(key):
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
# Add the custom col values
|
||||||
|
data_json_row.update({key: val for key, val in data.items() if key in dbmap["to_json_table"]})
|
||||||
|
cur.execute("INSERT OR REPLACE INTO json ?", data_json_row)
|
||||||
|
|
||||||
|
# Insert data to tables
|
||||||
|
for table_settings in dbmap.get("to_table", []):
|
||||||
|
if isinstance(table_settings, dict): # Custom settings
|
||||||
|
table_name = table_settings["table"] # Table name to insert datas
|
||||||
|
node = table_settings.get("node", table_name) # Node keyname in data json file
|
||||||
|
key_col = table_settings.get("key_col") # Map dict key as this col
|
||||||
|
val_col = table_settings.get("val_col") # Map dict value as this col
|
||||||
|
import_cols = table_settings.get("import_cols")
|
||||||
|
replaces = table_settings.get("replaces")
|
||||||
|
else: # Simple settings
|
||||||
|
table_name = table_settings
|
||||||
|
node = table_settings
|
||||||
|
key_col = None
|
||||||
|
val_col = None
|
||||||
|
import_cols = None
|
||||||
|
replaces = None
|
||||||
|
|
||||||
|
# Fill import cols from table cols
|
||||||
|
if not import_cols:
|
||||||
|
import_cols = set([item[0] for item in self.schema["tables"][table_name]["cols"]])
|
||||||
|
|
||||||
|
cur.execute("DELETE FROM %s WHERE json_id = ?" % table_name, (json_row["json_id"],))
|
||||||
|
|
||||||
|
if node not in data:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if key_col: # Map as dict
|
||||||
|
for key, val in data[node].items():
|
||||||
|
if val_col: # Single value
|
||||||
|
cur.execute(
|
||||||
|
"INSERT OR REPLACE INTO %s ?" % table_name,
|
||||||
|
{key_col: key, val_col: val, "json_id": json_row["json_id"]}
|
||||||
|
)
|
||||||
|
else: # Multi value
|
||||||
|
if type(val) is dict: # Single row
|
||||||
|
row = val
|
||||||
|
if import_cols:
|
||||||
|
row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
|
||||||
|
row[key_col] = key
|
||||||
|
# Replace in value if necessary
|
||||||
|
if replaces:
|
||||||
|
for replace_key, replace in replaces.items():
|
||||||
|
if replace_key in row:
|
||||||
|
for replace_from, replace_to in replace.items():
|
||||||
|
row[replace_key] = row[replace_key].replace(replace_from, replace_to)
|
||||||
|
|
||||||
|
row["json_id"] = json_row["json_id"]
|
||||||
|
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
|
||||||
|
elif type(val) is list: # Multi row
|
||||||
|
for row in val:
|
||||||
|
row[key_col] = key
|
||||||
|
row["json_id"] = json_row["json_id"]
|
||||||
|
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
|
||||||
|
else: # Map as list
|
||||||
|
for row in data[node]:
|
||||||
|
row["json_id"] = json_row["json_id"]
|
||||||
|
if import_cols:
|
||||||
|
row = {key: row[key] for key in row if key in import_cols} # Filter row by import_cols
|
||||||
|
cur.execute("INSERT OR REPLACE INTO %s ?" % table_name, row)
|
||||||
|
|
||||||
|
# Cleanup json row
|
||||||
|
if not data:
|
||||||
|
self.log.debug("Cleanup json row for %s" % file_path)
|
||||||
|
cur.execute("DELETE FROM json WHERE json_id = %s" % json_row["json_id"])
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
s = time.time()
|
||||||
|
console_log = logging.StreamHandler()
|
||||||
|
logging.getLogger('').setLevel(logging.DEBUG)
|
||||||
|
logging.getLogger('').addHandler(console_log)
|
||||||
|
console_log.setLevel(logging.DEBUG)
|
||||||
|
dbjson = Db(json.load(open("zerotalk.schema.json")), "data/users/zerotalk.db")
|
||||||
|
dbjson.collect_stats = True
|
||||||
|
dbjson.checkTables()
|
||||||
|
cur = dbjson.getCursor()
|
||||||
|
cur.logging = False
|
||||||
|
dbjson.updateJson("data/users/content.json", cur=cur)
|
||||||
|
for user_dir in os.listdir("data/users"):
|
||||||
|
if os.path.isdir("data/users/%s" % user_dir):
|
||||||
|
dbjson.updateJson("data/users/%s/data.json" % user_dir, cur=cur)
|
||||||
|
# print ".",
|
||||||
|
cur.logging = True
|
||||||
|
print("Done in %.3fs" % (time.time() - s))
|
||||||
|
for query, stats in sorted(dbjson.query_stats.items()):
|
||||||
|
print("-", query, stats)
|
|
@ -0,0 +1,246 @@
|
||||||
|
import time
|
||||||
|
import re
|
||||||
|
from util import helper
|
||||||
|
|
||||||
|
# Special sqlite cursor
|
||||||
|
|
||||||
|
|
||||||
|
class DbCursor:
|
||||||
|
|
||||||
|
def __init__(self, db):
|
||||||
|
self.db = db
|
||||||
|
self.logging = False
|
||||||
|
|
||||||
|
def quoteValue(self, value):
|
||||||
|
if type(value) is int:
|
||||||
|
return str(value)
|
||||||
|
else:
|
||||||
|
return "'%s'" % value.replace("'", "''")
|
||||||
|
|
||||||
|
def parseQuery(self, query, params):
|
||||||
|
query_type = query.split(" ", 1)[0].upper()
|
||||||
|
if isinstance(params, dict) and "?" in query: # Make easier select and insert by allowing dict params
|
||||||
|
if query_type in ("SELECT", "DELETE", "UPDATE"):
|
||||||
|
# Convert param dict to SELECT * FROM table WHERE key = ? AND key2 = ? format
|
||||||
|
query_wheres = []
|
||||||
|
values = []
|
||||||
|
for key, value in params.items():
|
||||||
|
if type(value) is list:
|
||||||
|
if key.startswith("not__"):
|
||||||
|
field = key.replace("not__", "")
|
||||||
|
operator = "NOT IN"
|
||||||
|
else:
|
||||||
|
field = key
|
||||||
|
operator = "IN"
|
||||||
|
if len(value) > 100:
|
||||||
|
# Embed values in query to avoid "too many SQL variables" error
|
||||||
|
query_values = ",".join(map(helper.sqlquote, value))
|
||||||
|
else:
|
||||||
|
query_values = ",".join(["?"] * len(value))
|
||||||
|
values += value
|
||||||
|
query_wheres.append(
|
||||||
|
"%s %s (%s)" %
|
||||||
|
(field, operator, query_values)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if key.startswith("not__"):
|
||||||
|
query_wheres.append(key.replace("not__", "") + " != ?")
|
||||||
|
elif key.endswith("__like"):
|
||||||
|
query_wheres.append(key.replace("__like", "") + " LIKE ?")
|
||||||
|
elif key.endswith(">"):
|
||||||
|
query_wheres.append(key.replace(">", "") + " > ?")
|
||||||
|
elif key.endswith("<"):
|
||||||
|
query_wheres.append(key.replace("<", "") + " < ?")
|
||||||
|
else:
|
||||||
|
query_wheres.append(key + " = ?")
|
||||||
|
values.append(value)
|
||||||
|
wheres = " AND ".join(query_wheres)
|
||||||
|
if wheres == "":
|
||||||
|
wheres = "1"
|
||||||
|
query = re.sub("(.*)[?]", "\\1 %s" % wheres, query) # Replace the last ?
|
||||||
|
params = values
|
||||||
|
else:
|
||||||
|
# Convert param dict to INSERT INTO table (key, key2) VALUES (?, ?) format
|
||||||
|
keys = ", ".join(params.keys())
|
||||||
|
values = ", ".join(['?' for key in params.keys()])
|
||||||
|
keysvalues = "(%s) VALUES (%s)" % (keys, values)
|
||||||
|
query = re.sub("(.*)[?]", "\\1%s" % keysvalues, query) # Replace the last ?
|
||||||
|
params = tuple(params.values())
|
||||||
|
elif isinstance(params, dict) and ":" in query:
|
||||||
|
new_params = dict()
|
||||||
|
values = []
|
||||||
|
for key, value in params.items():
|
||||||
|
if type(value) is list:
|
||||||
|
for idx, val in enumerate(value):
|
||||||
|
new_params[key + "__" + str(idx)] = val
|
||||||
|
|
||||||
|
new_names = [":" + key + "__" + str(idx) for idx in range(len(value))]
|
||||||
|
query = re.sub(r":" + re.escape(key) + r"([)\s]|$)", "(%s)%s" % (", ".join(new_names), r"\1"), query)
|
||||||
|
else:
|
||||||
|
new_params[key] = value
|
||||||
|
|
||||||
|
params = new_params
|
||||||
|
return query, params
|
||||||
|
|
||||||
|
def execute(self, query, params=None):
|
||||||
|
query = query.strip()
|
||||||
|
while self.db.progress_sleeping or self.db.commiting:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
self.db.last_query_time = time.time()
|
||||||
|
|
||||||
|
query, params = self.parseQuery(query, params)
|
||||||
|
|
||||||
|
cursor = self.db.getConn().cursor()
|
||||||
|
self.db.cursors.add(cursor)
|
||||||
|
if self.db.lock.locked():
|
||||||
|
self.db.log.debug("Locked for %.3fs" % (time.time() - self.db.lock.time_lock))
|
||||||
|
|
||||||
|
try:
|
||||||
|
s = time.time()
|
||||||
|
self.db.lock.acquire(True)
|
||||||
|
if query.upper().strip("; ") == "VACUUM":
|
||||||
|
self.db.commit("vacuum called")
|
||||||
|
if params:
|
||||||
|
res = cursor.execute(query, params)
|
||||||
|
else:
|
||||||
|
res = cursor.execute(query)
|
||||||
|
finally:
|
||||||
|
self.db.lock.release()
|
||||||
|
|
||||||
|
taken_query = time.time() - s
|
||||||
|
if self.logging or taken_query > 1:
|
||||||
|
if params: # Query has parameters
|
||||||
|
self.db.log.debug("Query: " + query + " " + str(params) + " (Done in %.4f)" % (time.time() - s))
|
||||||
|
else:
|
||||||
|
self.db.log.debug("Query: " + query + " (Done in %.4f)" % (time.time() - s))
|
||||||
|
|
||||||
|
# Log query stats
|
||||||
|
if self.db.collect_stats:
|
||||||
|
if query not in self.db.query_stats:
|
||||||
|
self.db.query_stats[query] = {"call": 0, "time": 0.0}
|
||||||
|
self.db.query_stats[query]["call"] += 1
|
||||||
|
self.db.query_stats[query]["time"] += time.time() - s
|
||||||
|
|
||||||
|
query_type = query.split(" ", 1)[0].upper()
|
||||||
|
is_update_query = query_type in ["UPDATE", "DELETE", "INSERT", "CREATE"]
|
||||||
|
if not self.db.need_commit and is_update_query:
|
||||||
|
self.db.need_commit = True
|
||||||
|
|
||||||
|
if is_update_query:
|
||||||
|
return cursor
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
def executemany(self, query, params):
|
||||||
|
while self.db.progress_sleeping or self.db.commiting:
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
self.db.last_query_time = time.time()
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
cursor = self.db.getConn().cursor()
|
||||||
|
self.db.cursors.add(cursor)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.db.lock.acquire(True)
|
||||||
|
cursor.executemany(query, params)
|
||||||
|
finally:
|
||||||
|
self.db.lock.release()
|
||||||
|
|
||||||
|
taken_query = time.time() - s
|
||||||
|
if self.logging or taken_query > 0.1:
|
||||||
|
self.db.log.debug("Execute many: %s (Done in %.4f)" % (query, taken_query))
|
||||||
|
|
||||||
|
self.db.need_commit = True
|
||||||
|
|
||||||
|
return cursor
|
||||||
|
|
||||||
|
# Creates on updates a database row without incrementing the rowid
|
||||||
|
def insertOrUpdate(self, table, query_sets, query_wheres, oninsert={}):
|
||||||
|
sql_sets = ["%s = :%s" % (key, key) for key in query_sets.keys()]
|
||||||
|
sql_wheres = ["%s = :%s" % (key, key) for key in query_wheres.keys()]
|
||||||
|
|
||||||
|
params = query_sets
|
||||||
|
params.update(query_wheres)
|
||||||
|
res = self.execute(
|
||||||
|
"UPDATE %s SET %s WHERE %s" % (table, ", ".join(sql_sets), " AND ".join(sql_wheres)),
|
||||||
|
params
|
||||||
|
)
|
||||||
|
if res.rowcount == 0:
|
||||||
|
params.update(oninsert) # Add insert-only fields
|
||||||
|
self.execute("INSERT INTO %s ?" % table, params)
|
||||||
|
|
||||||
|
# Create new table
|
||||||
|
# Return: True on success
|
||||||
|
def createTable(self, table, cols):
|
||||||
|
# TODO: Check current structure
|
||||||
|
self.execute("DROP TABLE IF EXISTS %s" % table)
|
||||||
|
col_definitions = []
|
||||||
|
for col_name, col_type in cols:
|
||||||
|
col_definitions.append("%s %s" % (col_name, col_type))
|
||||||
|
|
||||||
|
self.execute("CREATE TABLE %s (%s)" % (table, ",".join(col_definitions)))
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Create indexes on table
|
||||||
|
# Return: True on success
|
||||||
|
def createIndexes(self, table, indexes):
|
||||||
|
for index in indexes:
|
||||||
|
if not index.strip().upper().startswith("CREATE"):
|
||||||
|
self.db.log.error("Index command should start with CREATE: %s" % index)
|
||||||
|
continue
|
||||||
|
self.execute(index)
|
||||||
|
|
||||||
|
# Create table if not exist
|
||||||
|
# Return: True if updated
|
||||||
|
def needTable(self, table, cols, indexes=None, version=1):
|
||||||
|
current_version = self.db.getTableVersion(table)
|
||||||
|
if int(current_version) < int(version): # Table need update or not extis
|
||||||
|
self.db.log.debug("Table %s outdated...version: %s need: %s, rebuilding..." % (table, current_version, version))
|
||||||
|
self.createTable(table, cols)
|
||||||
|
if indexes:
|
||||||
|
self.createIndexes(table, indexes)
|
||||||
|
self.execute(
|
||||||
|
"INSERT OR REPLACE INTO keyvalue ?",
|
||||||
|
{"json_id": 0, "key": "table.%s.version" % table, "value": version}
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
else: # Not changed
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Get or create a row for json file
|
||||||
|
# Return: The database row
|
||||||
|
def getJsonRow(self, file_path):
|
||||||
|
directory, file_name = re.match("^(.*?)/*([^/]*)$", file_path).groups()
|
||||||
|
if self.db.schema["version"] == 1:
|
||||||
|
# One path field
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
|
||||||
|
row = res.fetchone()
|
||||||
|
if not row: # No row yet, create it
|
||||||
|
self.execute("INSERT INTO json ?", {"path": file_path})
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"path": file_path})
|
||||||
|
row = res.fetchone()
|
||||||
|
elif self.db.schema["version"] == 2:
|
||||||
|
# Separate directory, file_name (easier join)
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
|
||||||
|
row = res.fetchone()
|
||||||
|
if not row: # No row yet, create it
|
||||||
|
self.execute("INSERT INTO json ?", {"directory": directory, "file_name": file_name})
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"directory": directory, "file_name": file_name})
|
||||||
|
row = res.fetchone()
|
||||||
|
elif self.db.schema["version"] == 3:
|
||||||
|
# Separate site, directory, file_name (for merger sites)
|
||||||
|
site_address, directory = re.match("^([^/]*)/(.*)$", directory).groups()
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
|
||||||
|
row = res.fetchone()
|
||||||
|
if not row: # No row yet, create it
|
||||||
|
self.execute("INSERT INTO json ?", {"site": site_address, "directory": directory, "file_name": file_name})
|
||||||
|
res = self.execute("SELECT * FROM json WHERE ? LIMIT 1", {"site": site_address, "directory": directory, "file_name": file_name})
|
||||||
|
row = res.fetchone()
|
||||||
|
else:
|
||||||
|
raise Exception("Dbschema version %s not supported" % self.db.schema.get("version"))
|
||||||
|
return row
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
pass
|
|
@ -0,0 +1,46 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
# Parse and modify sql queries
|
||||||
|
class DbQuery:
|
||||||
|
def __init__(self, query):
|
||||||
|
self.setQuery(query.strip())
|
||||||
|
|
||||||
|
# Split main parts of query
|
||||||
|
def parseParts(self, query):
|
||||||
|
parts = re.split("(SELECT|FROM|WHERE|ORDER BY|LIMIT)", query)
|
||||||
|
parts = [_f for _f in parts if _f] # Remove empty parts
|
||||||
|
parts = [s.strip() for s in parts] # Remove whitespace
|
||||||
|
return dict(list(zip(parts[0::2], parts[1::2])))
|
||||||
|
|
||||||
|
# Parse selected fields SELECT ... FROM
|
||||||
|
def parseFields(self, query_select):
|
||||||
|
fields = re.findall("([^,]+) AS ([^,]+)", query_select)
|
||||||
|
return {key: val.strip() for val, key in fields}
|
||||||
|
|
||||||
|
# Parse query conditions WHERE ...
|
||||||
|
def parseWheres(self, query_where):
|
||||||
|
if " AND " in query_where:
|
||||||
|
return query_where.split(" AND ")
|
||||||
|
elif query_where:
|
||||||
|
return [query_where]
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Set the query
|
||||||
|
def setQuery(self, query):
|
||||||
|
self.parts = self.parseParts(query)
|
||||||
|
self.fields = self.parseFields(self.parts["SELECT"])
|
||||||
|
self.wheres = self.parseWheres(self.parts.get("WHERE", ""))
|
||||||
|
|
||||||
|
# Convert query back to string
|
||||||
|
def __str__(self):
|
||||||
|
query_parts = []
|
||||||
|
for part_name in ["SELECT", "FROM", "WHERE", "ORDER BY", "LIMIT"]:
|
||||||
|
if part_name == "WHERE" and self.wheres:
|
||||||
|
query_parts.append("WHERE")
|
||||||
|
query_parts.append(" AND ".join(self.wheres))
|
||||||
|
elif part_name in self.parts:
|
||||||
|
query_parts.append(part_name)
|
||||||
|
query_parts.append(self.parts[part_name])
|
||||||
|
return "\n".join(query_parts)
|
|
@ -0,0 +1,186 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from Config import config
|
||||||
|
|
||||||
|
|
||||||
|
# Non fatal exception
|
||||||
|
class Notify(Exception):
|
||||||
|
def __init__(self, message=None):
|
||||||
|
if message:
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
|
||||||
|
# Gevent greenlet.kill accept Exception type
|
||||||
|
def createNotifyType(message):
|
||||||
|
return type("Notify", (Notify, ), {"message": message})
|
||||||
|
|
||||||
|
|
||||||
|
def formatExceptionMessage(err):
|
||||||
|
err_type = err.__class__.__name__
|
||||||
|
if err.args:
|
||||||
|
err_message = err.args[-1]
|
||||||
|
else:
|
||||||
|
err_message = err.__str__()
|
||||||
|
return "%s: %s" % (err_type, err_message)
|
||||||
|
|
||||||
|
|
||||||
|
python_lib_dirs = [path.replace("\\", "/") for path in sys.path if re.sub(r".*[\\/]", "", path) in ("site-packages", "dist-packages")]
|
||||||
|
python_lib_dirs.append(os.path.dirname(os.__file__).replace("\\", "/")) # TODO: check if returns the correct path for PyPy
|
||||||
|
|
||||||
|
root_dir = os.path.realpath(os.path.dirname(__file__) + "/../../")
|
||||||
|
root_dir = root_dir.replace("\\", "/")
|
||||||
|
|
||||||
|
|
||||||
|
def formatTraceback(items, limit=None, fold_builtin=True):
|
||||||
|
back = []
|
||||||
|
i = 0
|
||||||
|
prev_file_title = ""
|
||||||
|
is_prev_builtin = False
|
||||||
|
|
||||||
|
for path, line in items:
|
||||||
|
i += 1
|
||||||
|
is_last = i == len(items)
|
||||||
|
path = path.replace("\\", "/")
|
||||||
|
|
||||||
|
if path.startswith("src/gevent/"):
|
||||||
|
file_title = "<gevent>/" + path[len("src/gevent/"):]
|
||||||
|
is_builtin = True
|
||||||
|
is_skippable_builtin = False
|
||||||
|
elif path in ("<frozen importlib._bootstrap>", "<frozen importlib._bootstrap_external>"):
|
||||||
|
file_title = "(importlib)"
|
||||||
|
is_builtin = True
|
||||||
|
is_skippable_builtin = True
|
||||||
|
else:
|
||||||
|
is_skippable_builtin = False
|
||||||
|
for base in python_lib_dirs:
|
||||||
|
if path.startswith(base + "/"):
|
||||||
|
file_title = path[len(base + "/"):]
|
||||||
|
module_name, *tail = file_title.split("/")
|
||||||
|
if module_name.endswith(".py"):
|
||||||
|
module_name = module_name[:-3]
|
||||||
|
file_title = "/".join(["<%s>" % module_name] + tail)
|
||||||
|
is_builtin = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
is_builtin = False
|
||||||
|
for base in (root_dir + "/src", root_dir + "/plugins", root_dir):
|
||||||
|
if path.startswith(base + "/"):
|
||||||
|
file_title = path[len(base + "/"):]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# For unknown paths, do our best to hide absolute path
|
||||||
|
file_title = path
|
||||||
|
for needle in ("/zeronet/", "/core/"):
|
||||||
|
if needle in file_title.lower():
|
||||||
|
file_title = "?/" + file_title[file_title.lower().rindex(needle) + len(needle):]
|
||||||
|
|
||||||
|
# Path compression: A/AB/ABC/X/Y.py -> ABC/X/Y.py
|
||||||
|
# E.g.: in 'Db/DbCursor.py' the directory part is unnecessary
|
||||||
|
if not file_title.startswith("/"):
|
||||||
|
prev_part = ""
|
||||||
|
for i, part in enumerate(file_title.split("/") + [""]):
|
||||||
|
if not part.startswith(prev_part):
|
||||||
|
break
|
||||||
|
prev_part = part
|
||||||
|
file_title = "/".join(file_title.split("/")[i - 1:])
|
||||||
|
|
||||||
|
if is_skippable_builtin and fold_builtin:
|
||||||
|
pass
|
||||||
|
elif is_builtin and is_prev_builtin and not is_last and fold_builtin:
|
||||||
|
if back[-1] != "...":
|
||||||
|
back.append("...")
|
||||||
|
else:
|
||||||
|
if file_title == prev_file_title:
|
||||||
|
back.append("%s" % line)
|
||||||
|
else:
|
||||||
|
back.append("%s line %s" % (file_title, line))
|
||||||
|
|
||||||
|
prev_file_title = file_title
|
||||||
|
is_prev_builtin = is_builtin
|
||||||
|
|
||||||
|
if limit and i >= limit:
|
||||||
|
back.append("...")
|
||||||
|
break
|
||||||
|
return back
|
||||||
|
|
||||||
|
|
||||||
|
def formatException(err=None, format="text"):
|
||||||
|
import traceback
|
||||||
|
if type(err) == Notify:
|
||||||
|
return err
|
||||||
|
elif type(err) == tuple and err and err[0] is not None: # Passed trackeback info
|
||||||
|
exc_type, exc_obj, exc_tb = err
|
||||||
|
err = None
|
||||||
|
else: # No trackeback info passed, get latest
|
||||||
|
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||||
|
|
||||||
|
if not err:
|
||||||
|
if hasattr(err, "message"):
|
||||||
|
err = exc_obj.message
|
||||||
|
else:
|
||||||
|
err = exc_obj
|
||||||
|
|
||||||
|
tb = formatTraceback([[frame[0], frame[1]] for frame in traceback.extract_tb(exc_tb)])
|
||||||
|
if format == "html":
|
||||||
|
return "%s: %s<br><small class='multiline'>%s</small>" % (repr(err), err, " > ".join(tb))
|
||||||
|
else:
|
||||||
|
return "%s: %s in %s" % (exc_type.__name__, err, " > ".join(tb))
|
||||||
|
|
||||||
|
|
||||||
|
def formatStack(limit=None):
|
||||||
|
import inspect
|
||||||
|
tb = formatTraceback([[frame[1], frame[2]] for frame in inspect.stack()[1:]], limit=limit)
|
||||||
|
return " > ".join(tb)
|
||||||
|
|
||||||
|
|
||||||
|
# Test if gevent eventloop blocks
|
||||||
|
import logging
|
||||||
|
import gevent
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
num_block = 0
|
||||||
|
|
||||||
|
|
||||||
|
def testBlock():
|
||||||
|
global num_block
|
||||||
|
logging.debug("Gevent block checker started")
|
||||||
|
last_time = time.time()
|
||||||
|
while 1:
|
||||||
|
time.sleep(1)
|
||||||
|
if time.time() - last_time > 1.1:
|
||||||
|
logging.debug("Gevent block detected: %.3fs" % (time.time() - last_time - 1))
|
||||||
|
num_block += 1
|
||||||
|
last_time = time.time()
|
||||||
|
|
||||||
|
|
||||||
|
gevent.spawn(testBlock)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
print(1 / 0)
|
||||||
|
except Exception as err:
|
||||||
|
print(type(err).__name__)
|
||||||
|
print("1/0 error: %s" % formatException(err))
|
||||||
|
|
||||||
|
def loadJson():
|
||||||
|
json.loads("Errr")
|
||||||
|
|
||||||
|
import json
|
||||||
|
try:
|
||||||
|
loadJson()
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
print("Json load error: %s" % formatException(err))
|
||||||
|
|
||||||
|
try:
|
||||||
|
raise Notify("nothing...")
|
||||||
|
except Exception as err:
|
||||||
|
print("Notify: %s" % formatException(err))
|
||||||
|
|
||||||
|
loadJson()
|
|
@ -0,0 +1,129 @@
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import signal
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
import gevent.hub
|
||||||
|
|
||||||
|
from Config import config
|
||||||
|
from . import Debug
|
||||||
|
|
||||||
|
last_error = None
|
||||||
|
|
||||||
|
thread_shutdown = None
|
||||||
|
|
||||||
|
def shutdownThread():
|
||||||
|
import main
|
||||||
|
try:
|
||||||
|
if "file_server" in dir(main):
|
||||||
|
thread = gevent.spawn(main.file_server.stop)
|
||||||
|
thread.join(timeout=60)
|
||||||
|
if "ui_server" in dir(main):
|
||||||
|
thread = gevent.spawn(main.ui_server.stop)
|
||||||
|
thread.join(timeout=10)
|
||||||
|
except Exception as err:
|
||||||
|
print("Error in shutdown thread: %s" % err)
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
def shutdown(reason="Unknown"):
|
||||||
|
global thread_shutdown
|
||||||
|
logging.info("Shutting down (reason: %s)..." % reason)
|
||||||
|
try:
|
||||||
|
if not thread_shutdown:
|
||||||
|
thread_shutdown = gevent.spawn(shutdownThread)
|
||||||
|
except Exception as err:
|
||||||
|
print("Proper shutdown error: %s" % err)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Store last error, ignore notify, allow manual error logging
|
||||||
|
def handleError(*args, **kwargs):
|
||||||
|
global last_error
|
||||||
|
if not args: # Manual called
|
||||||
|
args = sys.exc_info()
|
||||||
|
silent = True
|
||||||
|
else:
|
||||||
|
silent = False
|
||||||
|
if args[0].__name__ != "Notify":
|
||||||
|
last_error = args
|
||||||
|
|
||||||
|
if args[0].__name__ == "KeyboardInterrupt":
|
||||||
|
shutdown("Keyboard interrupt")
|
||||||
|
elif not silent and args[0].__name__ != "Notify":
|
||||||
|
logging.exception("Unhandled exception")
|
||||||
|
if "greenlet.py" not in args[2].tb_frame.f_code.co_filename: # Don't display error twice
|
||||||
|
sys.__excepthook__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Ignore notify errors
|
||||||
|
def handleErrorNotify(*args, **kwargs):
|
||||||
|
err = args[0]
|
||||||
|
if err.__name__ == "KeyboardInterrupt":
|
||||||
|
shutdown("Keyboard interrupt")
|
||||||
|
elif err.__name__ != "Notify":
|
||||||
|
logging.error("Unhandled exception: %s" % Debug.formatException(args))
|
||||||
|
sys.__excepthook__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
if config.debug: # Keep last error for /Debug
|
||||||
|
sys.excepthook = handleError
|
||||||
|
else:
|
||||||
|
sys.excepthook = handleErrorNotify
|
||||||
|
|
||||||
|
|
||||||
|
# Override default error handler to allow silent killing / custom logging
|
||||||
|
if "handle_error" in dir(gevent.hub.Hub):
|
||||||
|
gevent.hub.Hub._original_handle_error = gevent.hub.Hub.handle_error
|
||||||
|
else:
|
||||||
|
logging.debug("gevent.hub.Hub.handle_error not found using old gevent hooks")
|
||||||
|
OriginalGreenlet = gevent.Greenlet
|
||||||
|
class ErrorhookedGreenlet(OriginalGreenlet):
|
||||||
|
def _report_error(self, exc_info):
|
||||||
|
sys.excepthook(exc_info[0], exc_info[1], exc_info[2])
|
||||||
|
|
||||||
|
gevent.Greenlet = gevent.greenlet.Greenlet = ErrorhookedGreenlet
|
||||||
|
importlib.reload(gevent)
|
||||||
|
|
||||||
|
def handleGreenletError(context, type, value, tb):
|
||||||
|
if context.__class__ is tuple and context[0].__class__.__name__ == "ThreadPool":
|
||||||
|
# Exceptions in ThreadPool will be handled in the main Thread
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(value, str):
|
||||||
|
# Cython can raise errors where the value is a plain string
|
||||||
|
# e.g., AttributeError, "_semaphore.Semaphore has no attr", <traceback>
|
||||||
|
value = type(value)
|
||||||
|
|
||||||
|
if not issubclass(type, gevent.get_hub().NOT_ERROR):
|
||||||
|
sys.excepthook(type, value, tb)
|
||||||
|
|
||||||
|
gevent.get_hub().handle_error = handleGreenletError
|
||||||
|
|
||||||
|
try:
|
||||||
|
signal.signal(signal.SIGTERM, lambda signum, stack_frame: shutdown("SIGTERM"))
|
||||||
|
except Exception as err:
|
||||||
|
logging.debug("Error setting up SIGTERM watcher: %s" % err)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import time
|
||||||
|
from gevent import monkey
|
||||||
|
monkey.patch_all(thread=False, ssl=False)
|
||||||
|
from . import Debug
|
||||||
|
|
||||||
|
def sleeper(num):
|
||||||
|
print("started", num)
|
||||||
|
time.sleep(3)
|
||||||
|
raise Exception("Error")
|
||||||
|
print("stopped", num)
|
||||||
|
thread1 = gevent.spawn(sleeper, 1)
|
||||||
|
thread2 = gevent.spawn(sleeper, 2)
|
||||||
|
time.sleep(1)
|
||||||
|
print("killing...")
|
||||||
|
thread1.kill(exception=Debug.Notify("Worker stopped"))
|
||||||
|
#thread2.throw(Debug.Notify("Throw"))
|
||||||
|
print("killed")
|
||||||
|
gevent.joinall([thread1,thread2])
|
|
@ -0,0 +1,24 @@
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import gevent.lock
|
||||||
|
|
||||||
|
from Debug import Debug
|
||||||
|
|
||||||
|
|
||||||
|
class DebugLock:
|
||||||
|
def __init__(self, log_after=0.01, name="Lock"):
|
||||||
|
self.name = name
|
||||||
|
self.log_after = log_after
|
||||||
|
self.lock = gevent.lock.Semaphore(1)
|
||||||
|
self.release = self.lock.release
|
||||||
|
|
||||||
|
def acquire(self, *args, **kwargs):
|
||||||
|
s = time.time()
|
||||||
|
res = self.lock.acquire(*args, **kwargs)
|
||||||
|
time_taken = time.time() - s
|
||||||
|
if time_taken >= self.log_after:
|
||||||
|
logging.debug("%s: Waited %.3fs after called by %s" %
|
||||||
|
(self.name, time_taken, Debug.formatStack())
|
||||||
|
)
|
||||||
|
return res
|
|
@ -0,0 +1,135 @@
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import functools
|
||||||
|
|
||||||
|
from Config import config
|
||||||
|
from util import helper
|
||||||
|
|
||||||
|
|
||||||
|
# Find files with extension in path
|
||||||
|
def findfiles(path, find_ext):
|
||||||
|
def sorter(f1, f2):
|
||||||
|
f1 = f1[0].replace(path, "")
|
||||||
|
f2 = f2[0].replace(path, "")
|
||||||
|
if f1 == "":
|
||||||
|
return 1
|
||||||
|
elif f2 == "":
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return helper.cmp(f1.lower(), f2.lower())
|
||||||
|
|
||||||
|
for root, dirs, files in sorted(os.walk(path, topdown=False), key=functools.cmp_to_key(sorter)):
|
||||||
|
for file in sorted(files):
|
||||||
|
file_path = root + "/" + file
|
||||||
|
file_ext = file.split(".")[-1]
|
||||||
|
if file_ext in find_ext and not file.startswith("all."):
|
||||||
|
yield file_path.replace("\\", "/")
|
||||||
|
|
||||||
|
|
||||||
|
# Try to find coffeescript compiler in path
|
||||||
|
def findCoffeescriptCompiler():
|
||||||
|
coffeescript_compiler = None
|
||||||
|
try:
|
||||||
|
import distutils.spawn
|
||||||
|
coffeescript_compiler = helper.shellquote(distutils.spawn.find_executable("coffee")) + " --no-header -p"
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
if coffeescript_compiler:
|
||||||
|
return coffeescript_compiler
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Generates: all.js: merge *.js, compile coffeescript, all.css: merge *.css, vendor prefix features
|
||||||
|
def merge(merged_path):
|
||||||
|
merged_path = merged_path.replace("\\", "/")
|
||||||
|
merge_dir = os.path.dirname(merged_path)
|
||||||
|
s = time.time()
|
||||||
|
ext = merged_path.split(".")[-1]
|
||||||
|
if ext == "js": # If merging .js find .coffee too
|
||||||
|
find_ext = ["js", "coffee"]
|
||||||
|
else:
|
||||||
|
find_ext = [ext]
|
||||||
|
|
||||||
|
# If exist check the other files modification date
|
||||||
|
if os.path.isfile(merged_path):
|
||||||
|
merged_mtime = os.path.getmtime(merged_path)
|
||||||
|
else:
|
||||||
|
merged_mtime = 0
|
||||||
|
|
||||||
|
changed = {}
|
||||||
|
for file_path in findfiles(merge_dir, find_ext):
|
||||||
|
if os.path.getmtime(file_path) > merged_mtime + 1:
|
||||||
|
changed[file_path] = True
|
||||||
|
if not changed:
|
||||||
|
return # Assets not changed, nothing to do
|
||||||
|
|
||||||
|
old_parts = {}
|
||||||
|
if os.path.isfile(merged_path): # Find old parts to avoid unncessary recompile
|
||||||
|
merged_old = open(merged_path, "rb").read()
|
||||||
|
for match in re.findall(rb"(/\* ---- (.*?) ---- \*/(.*?)(?=/\* ----|$))", merged_old, re.DOTALL):
|
||||||
|
old_parts[match[1].decode()] = match[2].strip(b"\n\r")
|
||||||
|
|
||||||
|
logging.debug("Merging %s (changed: %s, old parts: %s)" % (merged_path, changed, len(old_parts)))
|
||||||
|
# Merge files
|
||||||
|
parts = []
|
||||||
|
s_total = time.time()
|
||||||
|
for file_path in findfiles(merge_dir, find_ext):
|
||||||
|
file_relative_path = file_path.replace(merge_dir + "/", "")
|
||||||
|
parts.append(b"\n/* ---- %s ---- */\n\n" % file_relative_path.encode("utf8"))
|
||||||
|
if file_path.endswith(".coffee"): # Compile coffee script
|
||||||
|
if file_path in changed or file_relative_path not in old_parts: # Only recompile if changed or its not compiled before
|
||||||
|
if config.coffeescript_compiler is None:
|
||||||
|
config.coffeescript_compiler = findCoffeescriptCompiler()
|
||||||
|
if not config.coffeescript_compiler:
|
||||||
|
logging.error("No coffeescript compiler defined, skipping compiling %s" % merged_path)
|
||||||
|
return False # No coffeescript compiler, skip this file
|
||||||
|
|
||||||
|
# Replace / with os separators and escape it
|
||||||
|
file_path_escaped = helper.shellquote(file_path.replace("/", os.path.sep))
|
||||||
|
|
||||||
|
if "%s" in config.coffeescript_compiler: # Replace %s with coffeescript file
|
||||||
|
command = config.coffeescript_compiler.replace("%s", file_path_escaped)
|
||||||
|
else: # Put coffeescript file to end
|
||||||
|
command = config.coffeescript_compiler + " " + file_path_escaped
|
||||||
|
|
||||||
|
# Start compiling
|
||||||
|
s = time.time()
|
||||||
|
compiler = subprocess.Popen(command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
|
||||||
|
out = compiler.stdout.read()
|
||||||
|
compiler.wait()
|
||||||
|
logging.debug("Running: %s (Done in %.2fs)" % (command, time.time() - s))
|
||||||
|
|
||||||
|
# Check errors
|
||||||
|
if out and out.startswith(b"("): # No error found
|
||||||
|
parts.append(out)
|
||||||
|
else: # Put error message in place of source code
|
||||||
|
error = out
|
||||||
|
logging.error("%s Compile error: %s" % (file_relative_path, error))
|
||||||
|
error_escaped = re.escape(error).replace(b"\n", b"\\n").replace(br"\\n", br"\n")
|
||||||
|
parts.append(
|
||||||
|
b"alert('%s compile error: %s');" %
|
||||||
|
(file_relative_path.encode(), error_escaped)
|
||||||
|
)
|
||||||
|
else: # Not changed use the old_part
|
||||||
|
parts.append(old_parts[file_relative_path])
|
||||||
|
else: # Add to parts
|
||||||
|
parts.append(open(file_path, "rb").read())
|
||||||
|
|
||||||
|
merged = b"\n".join(parts)
|
||||||
|
if ext == "css": # Vendor prefix css
|
||||||
|
from lib.cssvendor import cssvendor
|
||||||
|
merged = cssvendor.prefix(merged)
|
||||||
|
merged = merged.replace(b"\r", b"")
|
||||||
|
open(merged_path, "wb").write(merged)
|
||||||
|
logging.debug("Merged %s (%.2fs)" % (merged_path, time.time() - s_total))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
|
os.chdir("..")
|
||||||
|
config.coffeescript_compiler = r'type "%s" | tools\coffee-node\bin\node.exe tools\coffee-node\bin\coffee --no-header -s -p'
|
||||||
|
merge("data/12Hw8rTgzrNo4DSh2AkqwPRqDyTticwJyH/js/all.js")
|
|
@ -0,0 +1,69 @@
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from Config import config
|
||||||
|
|
||||||
|
if config.debug and config.action == "main":
|
||||||
|
try:
|
||||||
|
import watchdog
|
||||||
|
import watchdog.observers
|
||||||
|
import watchdog.events
|
||||||
|
logging.debug("Watchdog fs listener detected, source code autoreload enabled")
|
||||||
|
enabled = True
|
||||||
|
except Exception as err:
|
||||||
|
logging.debug("Watchdog fs listener could not be loaded: %s" % err)
|
||||||
|
enabled = False
|
||||||
|
else:
|
||||||
|
enabled = False
|
||||||
|
|
||||||
|
|
||||||
|
class DebugReloader:
|
||||||
|
def __init__(self, paths=None):
|
||||||
|
if not paths:
|
||||||
|
paths = ["src", "plugins", config.data_dir + "/__plugins__"]
|
||||||
|
self.log = logging.getLogger("DebugReloader")
|
||||||
|
self.last_chaged = 0
|
||||||
|
self.callbacks = []
|
||||||
|
if enabled:
|
||||||
|
self.observer = watchdog.observers.Observer()
|
||||||
|
event_handler = watchdog.events.FileSystemEventHandler()
|
||||||
|
event_handler.on_modified = event_handler.on_deleted = self.onChanged
|
||||||
|
event_handler.on_created = event_handler.on_moved = self.onChanged
|
||||||
|
for path in paths:
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
continue
|
||||||
|
self.log.debug("Adding autoreload: %s" % path)
|
||||||
|
self.observer.schedule(event_handler, path, recursive=True)
|
||||||
|
self.observer.start()
|
||||||
|
|
||||||
|
def addCallback(self, f):
|
||||||
|
self.callbacks.append(f)
|
||||||
|
|
||||||
|
def onChanged(self, evt):
|
||||||
|
path = evt.src_path
|
||||||
|
ext = path.rsplit(".", 1)[-1]
|
||||||
|
if ext not in ["py", "json"] or "Test" in path or time.time() - self.last_chaged < 1.0:
|
||||||
|
return False
|
||||||
|
self.last_chaged = time.time()
|
||||||
|
if os.path.isfile(path):
|
||||||
|
time_modified = os.path.getmtime(path)
|
||||||
|
else:
|
||||||
|
time_modified = 0
|
||||||
|
self.log.debug("File changed: %s reloading source code (modified %.3fs ago)" % (evt, time.time() - time_modified))
|
||||||
|
if time.time() - time_modified > 5: # Probably it's just an attribute change, ignore it
|
||||||
|
return False
|
||||||
|
|
||||||
|
time.sleep(0.1) # Wait for lock release
|
||||||
|
for callback in self.callbacks:
|
||||||
|
try:
|
||||||
|
callback()
|
||||||
|
except Exception as err:
|
||||||
|
self.log.exception(err)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if enabled:
|
||||||
|
self.observer.stop()
|
||||||
|
self.log.debug("Stopped autoreload observer")
|
||||||
|
|
||||||
|
watcher = DebugReloader()
|
|
@ -0,0 +1,448 @@
|
||||||
|
# Included modules
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import collections
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
# Third party modules
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
from Debug import Debug
|
||||||
|
from Config import config
|
||||||
|
from util import RateLimit
|
||||||
|
from util import Msgpack
|
||||||
|
from util import helper
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from contextlib import closing
|
||||||
|
|
||||||
|
FILE_BUFF = 1024 * 512
|
||||||
|
|
||||||
|
|
||||||
|
class RequestError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Incoming requests
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class FileRequest(object):
|
||||||
|
__slots__ = ("server", "connection", "req_id", "sites", "log", "responded")
|
||||||
|
|
||||||
|
def __init__(self, server, connection):
|
||||||
|
self.server = server
|
||||||
|
self.connection = connection
|
||||||
|
|
||||||
|
self.req_id = None
|
||||||
|
self.sites = self.server.sites
|
||||||
|
self.log = server.log
|
||||||
|
self.responded = False # Responded to the request
|
||||||
|
|
||||||
|
def send(self, msg, streaming=False):
|
||||||
|
if not self.connection.closed:
|
||||||
|
self.connection.send(msg, streaming)
|
||||||
|
|
||||||
|
def sendRawfile(self, file, read_bytes):
|
||||||
|
if not self.connection.closed:
|
||||||
|
self.connection.sendRawfile(file, read_bytes)
|
||||||
|
|
||||||
|
def response(self, msg, streaming=False):
|
||||||
|
if self.responded:
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug("Req id %s already responded" % self.req_id)
|
||||||
|
return
|
||||||
|
if not isinstance(msg, dict): # If msg not a dict create a {"body": msg}
|
||||||
|
msg = {"body": msg}
|
||||||
|
msg["cmd"] = "response"
|
||||||
|
msg["to"] = self.req_id
|
||||||
|
self.responded = True
|
||||||
|
self.send(msg, streaming=streaming)
|
||||||
|
|
||||||
|
# Route file requests
|
||||||
|
def route(self, cmd, req_id, params):
|
||||||
|
self.req_id = req_id
|
||||||
|
# Don't allow other sites than locked
|
||||||
|
if "site" in params and self.connection.target_onion:
|
||||||
|
valid_sites = self.connection.getValidSites()
|
||||||
|
if params["site"] not in valid_sites and valid_sites != ["global"]:
|
||||||
|
self.response({"error": "Invalid site"})
|
||||||
|
self.connection.log(
|
||||||
|
"Site lock violation: %s not in %s, target onion: %s" %
|
||||||
|
(params["site"], valid_sites, self.connection.target_onion)
|
||||||
|
)
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if cmd == "update":
|
||||||
|
event = "%s update %s %s" % (self.connection.id, params["site"], params["inner_path"])
|
||||||
|
# If called more than once within 15 sec only keep the last update
|
||||||
|
RateLimit.callAsync(event, max(self.connection.bad_actions, 15), self.actionUpdate, params)
|
||||||
|
else:
|
||||||
|
func_name = "action" + cmd[0].upper() + cmd[1:]
|
||||||
|
func = getattr(self, func_name, None)
|
||||||
|
if cmd not in ["getFile", "streamFile"]: # Skip IO bound functions
|
||||||
|
if self.connection.cpu_time > 0.5:
|
||||||
|
self.log.debug(
|
||||||
|
"Delay %s %s, cpu_time used by connection: %.3fs" %
|
||||||
|
(self.connection.ip, cmd, self.connection.cpu_time)
|
||||||
|
)
|
||||||
|
time.sleep(self.connection.cpu_time)
|
||||||
|
if self.connection.cpu_time > 5:
|
||||||
|
self.connection.close("Cpu time: %.3fs" % self.connection.cpu_time)
|
||||||
|
s = time.time()
|
||||||
|
if func:
|
||||||
|
func(params)
|
||||||
|
else:
|
||||||
|
self.actionUnknown(cmd, params)
|
||||||
|
|
||||||
|
if cmd not in ["getFile", "streamFile"]:
|
||||||
|
taken = time.time() - s
|
||||||
|
taken_sent = self.connection.last_sent_time - self.connection.last_send_time
|
||||||
|
self.connection.cpu_time += taken - taken_sent
|
||||||
|
|
||||||
|
# Update a site file request
|
||||||
|
def actionUpdate(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(1)
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
inner_path = params.get("inner_path", "")
|
||||||
|
current_content_modified = site.content_manager.contents.get(inner_path, {}).get("modified", 0)
|
||||||
|
body = params["body"]
|
||||||
|
|
||||||
|
if not inner_path.endswith("content.json"):
|
||||||
|
self.response({"error": "Only content.json update allowed"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return
|
||||||
|
|
||||||
|
should_validate_content = True
|
||||||
|
if "modified" in params and params["modified"] <= current_content_modified:
|
||||||
|
should_validate_content = False
|
||||||
|
valid = None # Same or earlier content as we have
|
||||||
|
elif not body: # No body sent, we have to download it first
|
||||||
|
site.log.debug("Missing body from update for file %s, downloading ..." % inner_path)
|
||||||
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
||||||
|
try:
|
||||||
|
body = peer.getFile(site.address, inner_path).read()
|
||||||
|
except Exception as err:
|
||||||
|
site.log.debug("Can't download updated file %s: %s" % (inner_path, err))
|
||||||
|
self.response({"error": "File invalid update: Can't download updaed file"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return
|
||||||
|
|
||||||
|
if should_validate_content:
|
||||||
|
try:
|
||||||
|
content = json.loads(body.decode())
|
||||||
|
except Exception as err:
|
||||||
|
site.log.debug("Update for %s is invalid JSON: %s" % (inner_path, err))
|
||||||
|
self.response({"error": "File invalid JSON"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return
|
||||||
|
|
||||||
|
file_uri = "%s/%s:%s" % (site.address, inner_path, content["modified"])
|
||||||
|
|
||||||
|
if self.server.files_parsing.get(file_uri): # Check if we already working on it
|
||||||
|
valid = None # Same file
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
valid = site.content_manager.verifyFile(inner_path, content)
|
||||||
|
except Exception as err:
|
||||||
|
site.log.debug("Update for %s is invalid: %s" % (inner_path, err))
|
||||||
|
error = err
|
||||||
|
valid = False
|
||||||
|
|
||||||
|
if valid is True: # Valid and changed
|
||||||
|
site.log.info("Update for %s looks valid, saving..." % inner_path)
|
||||||
|
self.server.files_parsing[file_uri] = True
|
||||||
|
site.storage.write(inner_path, body)
|
||||||
|
del params["body"]
|
||||||
|
|
||||||
|
site.onFileDone(inner_path) # Trigger filedone
|
||||||
|
|
||||||
|
if inner_path.endswith("content.json"): # Download every changed file from peer
|
||||||
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update") # Add or get peer
|
||||||
|
# On complete publish to other peers
|
||||||
|
diffs = params.get("diffs", {})
|
||||||
|
site.onComplete.once(lambda: site.publish(inner_path=inner_path, diffs=diffs), "publish_%s" % inner_path)
|
||||||
|
|
||||||
|
# Load new content file and download changed files in new thread
|
||||||
|
def downloader():
|
||||||
|
site.downloadContent(inner_path, peer=peer, diffs=params.get("diffs", {}))
|
||||||
|
del self.server.files_parsing[file_uri]
|
||||||
|
|
||||||
|
gevent.spawn(downloader)
|
||||||
|
else:
|
||||||
|
del self.server.files_parsing[file_uri]
|
||||||
|
|
||||||
|
self.response({"ok": "Thanks, file %s updated!" % inner_path})
|
||||||
|
self.connection.goodAction()
|
||||||
|
|
||||||
|
elif valid is None: # Not changed
|
||||||
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="update old") # Add or get peer
|
||||||
|
if peer:
|
||||||
|
if not peer.connection:
|
||||||
|
peer.connect(self.connection) # Assign current connection to peer
|
||||||
|
if inner_path in site.content_manager.contents:
|
||||||
|
peer.last_content_json_update = site.content_manager.contents[inner_path]["modified"]
|
||||||
|
if config.verbose:
|
||||||
|
site.log.debug(
|
||||||
|
"Same version, adding new peer for locked files: %s, tasks: %s" %
|
||||||
|
(peer.key, len(site.worker_manager.tasks))
|
||||||
|
)
|
||||||
|
for task in site.worker_manager.tasks: # New peer add to every ongoing task
|
||||||
|
if task["peers"] and not task["optional_hash_id"]:
|
||||||
|
# Download file from this peer too if its peer locked
|
||||||
|
site.needFile(task["inner_path"], peer=peer, update=True, blocking=False)
|
||||||
|
|
||||||
|
self.response({"ok": "File not changed"})
|
||||||
|
self.connection.badAction()
|
||||||
|
|
||||||
|
else: # Invalid sign or sha hash
|
||||||
|
self.response({"error": "File %s invalid: %s" % (inner_path, error)})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
|
||||||
|
def isReadable(self, site, inner_path, file, pos):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Send file content request
|
||||||
|
def handleGetFile(self, params, streaming=False):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
file_path = site.storage.getPath(params["inner_path"])
|
||||||
|
if streaming:
|
||||||
|
file_obj = site.storage.open(params["inner_path"])
|
||||||
|
else:
|
||||||
|
file_obj = Msgpack.FilePart(file_path, "rb")
|
||||||
|
|
||||||
|
with file_obj as file:
|
||||||
|
file.seek(params["location"])
|
||||||
|
read_bytes = params.get("read_bytes", FILE_BUFF)
|
||||||
|
file_size = os.fstat(file.fileno()).st_size
|
||||||
|
|
||||||
|
if file_size > read_bytes: # Check if file is readable at current position (for big files)
|
||||||
|
if not self.isReadable(site, params["inner_path"], file, params["location"]):
|
||||||
|
raise RequestError("File not readable at position: %s" % params["location"])
|
||||||
|
else:
|
||||||
|
if params.get("file_size") and params["file_size"] != file_size:
|
||||||
|
self.connection.badAction(2)
|
||||||
|
raise RequestError("File size does not match: %sB != %sB" % (params["file_size"], file_size))
|
||||||
|
|
||||||
|
if not streaming:
|
||||||
|
file.read_bytes = read_bytes
|
||||||
|
|
||||||
|
if params["location"] > file_size:
|
||||||
|
self.connection.badAction(5)
|
||||||
|
raise RequestError("Bad file location")
|
||||||
|
|
||||||
|
if streaming:
|
||||||
|
back = {
|
||||||
|
"size": file_size,
|
||||||
|
"location": min(file.tell() + read_bytes, file_size),
|
||||||
|
"stream_bytes": min(read_bytes, file_size - params["location"])
|
||||||
|
}
|
||||||
|
self.response(back)
|
||||||
|
self.sendRawfile(file, read_bytes=read_bytes)
|
||||||
|
else:
|
||||||
|
back = {
|
||||||
|
"body": file,
|
||||||
|
"size": file_size,
|
||||||
|
"location": min(file.tell() + file.read_bytes, file_size)
|
||||||
|
}
|
||||||
|
self.response(back, streaming=True)
|
||||||
|
|
||||||
|
bytes_sent = min(read_bytes, file_size - params["location"]) # Number of bytes we going to send
|
||||||
|
site.settings["bytes_sent"] = site.settings.get("bytes_sent", 0) + bytes_sent
|
||||||
|
if config.debug_socket:
|
||||||
|
self.log.debug("File %s at position %s sent %s bytes" % (file_path, params["location"], bytes_sent))
|
||||||
|
|
||||||
|
# Add peer to site if not added before
|
||||||
|
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
|
||||||
|
if connected_peer: # Just added
|
||||||
|
connected_peer.connect(self.connection) # Assign current connection to peer
|
||||||
|
|
||||||
|
return {"bytes_sent": bytes_sent, "file_size": file_size, "location": params["location"]}
|
||||||
|
|
||||||
|
except RequestError as err:
|
||||||
|
self.log.debug("GetFile %s %s %s request error: %s" % (self.connection, params["site"], params["inner_path"], Debug.formatException(err)))
|
||||||
|
self.response({"error": "File read error: %s" % err})
|
||||||
|
except OSError as err:
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug("GetFile read error: %s" % Debug.formatException(err))
|
||||||
|
self.response({"error": "File read error"})
|
||||||
|
return False
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("GetFile exception: %s" % Debug.formatException(err))
|
||||||
|
self.response({"error": "File read exception"})
|
||||||
|
return False
|
||||||
|
|
||||||
|
def actionGetFile(self, params):
|
||||||
|
return self.handleGetFile(params)
|
||||||
|
|
||||||
|
def actionStreamFile(self, params):
|
||||||
|
return self.handleGetFile(params, streaming=True)
|
||||||
|
|
||||||
|
# Peer exchange request
|
||||||
|
def actionPex(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
got_peer_keys = []
|
||||||
|
added = 0
|
||||||
|
|
||||||
|
# Add requester peer to site
|
||||||
|
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
|
||||||
|
|
||||||
|
if connected_peer: # It was not registered before
|
||||||
|
added += 1
|
||||||
|
connected_peer.connect(self.connection) # Assign current connection to peer
|
||||||
|
|
||||||
|
# Add sent peers to site
|
||||||
|
for packed_address in itertools.chain(params.get("peers", []), params.get("peers_ipv6", [])):
|
||||||
|
address = helper.unpackAddress(packed_address)
|
||||||
|
got_peer_keys.append("%s:%s" % address)
|
||||||
|
if site.addPeer(*address, source="pex"):
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
# Add sent onion peers to site
|
||||||
|
for packed_address in params.get("peers_onion", []):
|
||||||
|
address = helper.unpackOnionAddress(packed_address)
|
||||||
|
got_peer_keys.append("%s:%s" % address)
|
||||||
|
if site.addPeer(*address, source="pex"):
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
# Send back peers that is not in the sent list and connectable (not port 0)
|
||||||
|
packed_peers = helper.packPeers(site.getConnectablePeers(params["need"], ignore=got_peer_keys, allow_private=False))
|
||||||
|
|
||||||
|
if added:
|
||||||
|
site.worker_manager.onPeers()
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"Added %s peers to %s using pex, sending back %s" %
|
||||||
|
(added, site, {key: len(val) for key, val in packed_peers.items()})
|
||||||
|
)
|
||||||
|
|
||||||
|
back = {
|
||||||
|
"peers": packed_peers["ipv4"],
|
||||||
|
"peers_ipv6": packed_peers["ipv6"],
|
||||||
|
"peers_onion": packed_peers["onion"]
|
||||||
|
}
|
||||||
|
|
||||||
|
self.response(back)
|
||||||
|
|
||||||
|
# Get modified content.json files since
|
||||||
|
def actionListModified(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
modified_files = site.content_manager.listModified(params["since"])
|
||||||
|
|
||||||
|
# Add peer to site if not added before
|
||||||
|
connected_peer = site.addPeer(self.connection.ip, self.connection.port, source="request")
|
||||||
|
if connected_peer: # Just added
|
||||||
|
connected_peer.connect(self.connection) # Assign current connection to peer
|
||||||
|
|
||||||
|
self.response({"modified_files": modified_files})
|
||||||
|
|
||||||
|
def actionGetHashfield(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Add peer to site if not added before
|
||||||
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, source="request")
|
||||||
|
if not peer.connection: # Just added
|
||||||
|
peer.connect(self.connection) # Assign current connection to peer
|
||||||
|
|
||||||
|
peer.time_my_hashfield_sent = time.time() # Don't send again if not changed
|
||||||
|
|
||||||
|
self.response({"hashfield_raw": site.content_manager.hashfield.tobytes()})
|
||||||
|
|
||||||
|
def findHashIds(self, site, hash_ids, limit=100):
|
||||||
|
back = collections.defaultdict(lambda: collections.defaultdict(list))
|
||||||
|
found = site.worker_manager.findOptionalHashIds(hash_ids, limit=limit)
|
||||||
|
|
||||||
|
for hash_id, peers in found.items():
|
||||||
|
for peer in peers:
|
||||||
|
ip_type = self.server.getIpType(peer.ip)
|
||||||
|
if len(back[ip_type][hash_id]) < 20:
|
||||||
|
back[ip_type][hash_id].append(peer.packMyAddress())
|
||||||
|
return back
|
||||||
|
|
||||||
|
def actionFindHashIds(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
s = time.time()
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
event_key = "%s_findHashIds_%s_%s" % (self.connection.ip, params["site"], len(params["hash_ids"]))
|
||||||
|
if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed(event_key, 60 * 5):
|
||||||
|
time.sleep(0.1)
|
||||||
|
back = self.findHashIds(site, params["hash_ids"], limit=10)
|
||||||
|
else:
|
||||||
|
back = self.findHashIds(site, params["hash_ids"])
|
||||||
|
RateLimit.called(event_key)
|
||||||
|
|
||||||
|
my_hashes = []
|
||||||
|
my_hashfield_set = set(site.content_manager.hashfield)
|
||||||
|
for hash_id in params["hash_ids"]:
|
||||||
|
if hash_id in my_hashfield_set:
|
||||||
|
my_hashes.append(hash_id)
|
||||||
|
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"Found: %s for %s hashids in %.3fs" %
|
||||||
|
({key: len(val) for key, val in back.items()}, len(params["hash_ids"]), time.time() - s)
|
||||||
|
)
|
||||||
|
self.response({"peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"], "my": my_hashes})
|
||||||
|
|
||||||
|
def actionSetHashfield(self, params):
|
||||||
|
site = self.sites.get(params["site"])
|
||||||
|
if not site or not site.isServing(): # Site unknown or not serving
|
||||||
|
self.response({"error": "Unknown site"})
|
||||||
|
self.connection.badAction(5)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Add or get peer
|
||||||
|
peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection, source="request")
|
||||||
|
if not peer.connection:
|
||||||
|
peer.connect(self.connection)
|
||||||
|
peer.hashfield.replaceFromBytes(params["hashfield_raw"])
|
||||||
|
self.response({"ok": "Updated"})
|
||||||
|
|
||||||
|
# Send a simple Pong! answer
|
||||||
|
def actionPing(self, params):
|
||||||
|
self.response(b"Pong!")
|
||||||
|
|
||||||
|
# Check requested port of the other peer
|
||||||
|
def actionCheckport(self, params):
|
||||||
|
if self.server.getIpType(self.connection.ip) == "ipv6":
|
||||||
|
sock_address = (self.connection.ip, params["port"], 0, 0)
|
||||||
|
else:
|
||||||
|
sock_address = (self.connection.ip, params["port"])
|
||||||
|
|
||||||
|
with closing(helper.createSocket(self.connection.ip)) as sock:
|
||||||
|
sock.settimeout(5)
|
||||||
|
if sock.connect_ex(sock_address) == 0:
|
||||||
|
self.response({"status": "open", "ip_external": self.connection.ip})
|
||||||
|
else:
|
||||||
|
self.response({"status": "closed", "ip_external": self.connection.ip})
|
||||||
|
|
||||||
|
# Unknown command
|
||||||
|
def actionUnknown(self, cmd, params):
|
||||||
|
self.response({"error": "Unknown command: %s" % cmd})
|
||||||
|
self.connection.badAction(5)
|
|
@ -0,0 +1,788 @@
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
import gevent.pool
|
||||||
|
from gevent.server import StreamServer
|
||||||
|
|
||||||
|
import util
|
||||||
|
from util import helper
|
||||||
|
from Config import config
|
||||||
|
from .FileRequest import FileRequest
|
||||||
|
from Peer import PeerPortchecker
|
||||||
|
from Site import SiteManager
|
||||||
|
from Connection import ConnectionServer
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from Debug import Debug
|
||||||
|
|
||||||
|
log = logging.getLogger("FileServer")
|
||||||
|
|
||||||
|
class FakeThread(object):
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
def ready(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class FileServer(ConnectionServer):
|
||||||
|
|
||||||
|
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type):
|
||||||
|
self.site_manager = SiteManager.site_manager
|
||||||
|
self.portchecker = PeerPortchecker.PeerPortchecker(self)
|
||||||
|
self.ip_type = ip_type
|
||||||
|
self.ip_external_list = []
|
||||||
|
|
||||||
|
# This is wrong:
|
||||||
|
# self.log = logging.getLogger("FileServer")
|
||||||
|
# The value of self.log will be overwritten in ConnectionServer.__init__()
|
||||||
|
|
||||||
|
self.recheck_port = True
|
||||||
|
|
||||||
|
self.active_mode_thread_pool = gevent.pool.Pool(None)
|
||||||
|
self.site_pool = gevent.pool.Pool(None)
|
||||||
|
|
||||||
|
self.update_pool = gevent.pool.Pool(10)
|
||||||
|
self.update_start_time = 0
|
||||||
|
self.update_sites_task_next_nr = 1
|
||||||
|
|
||||||
|
self.update_threads = weakref.WeakValueDictionary()
|
||||||
|
|
||||||
|
self.passive_mode = None
|
||||||
|
self.active_mode = None
|
||||||
|
self.active_mode_threads = {}
|
||||||
|
|
||||||
|
|
||||||
|
self.supported_ip_types = ["ipv4"] # Outgoing ip_type support
|
||||||
|
if self.getIpType(ip) == "ipv6" or self.isIpv6Supported():
|
||||||
|
self.supported_ip_types.append("ipv6")
|
||||||
|
|
||||||
|
if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types):
|
||||||
|
ip = ip.replace("*", "::")
|
||||||
|
else:
|
||||||
|
ip = ip.replace("*", "0.0.0.0")
|
||||||
|
|
||||||
|
if config.tor == "always":
|
||||||
|
port = config.tor_hs_port
|
||||||
|
config.fileserver_port = port
|
||||||
|
elif port == 0: # Use random port
|
||||||
|
port_range_from, port_range_to = list(map(int, config.fileserver_port_range.split("-")))
|
||||||
|
port = self.getRandomPort(ip, port_range_from, port_range_to)
|
||||||
|
config.fileserver_port = port
|
||||||
|
if not port:
|
||||||
|
raise Exception("Can't find bindable port")
|
||||||
|
if not config.tor == "always":
|
||||||
|
config.saveValue("fileserver_port", port) # Save random port value for next restart
|
||||||
|
config.arguments.fileserver_port = port
|
||||||
|
|
||||||
|
ConnectionServer.__init__(self, ip, port, self.handleRequest)
|
||||||
|
log.debug("Supported IP types: %s" % self.supported_ip_types)
|
||||||
|
|
||||||
|
self.managed_pools["active_mode_thread"] = self.active_mode_thread_pool
|
||||||
|
self.managed_pools["update"] = self.update_pool
|
||||||
|
self.managed_pools["site"] = self.site_pool
|
||||||
|
|
||||||
|
if ip_type == "dual" and ip == "::":
|
||||||
|
# Also bind to ipv4 addres in dual mode
|
||||||
|
try:
|
||||||
|
log.debug("Binding proxy to %s:%s" % ("::", self.port))
|
||||||
|
self.stream_server_proxy = StreamServer(
|
||||||
|
("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
log.info("StreamServer proxy create error: %s" % Debug.formatException(err))
|
||||||
|
|
||||||
|
self.port_opened = {}
|
||||||
|
|
||||||
|
self.last_request = time.time()
|
||||||
|
self.files_parsing = {}
|
||||||
|
self.ui_server = None
|
||||||
|
|
||||||
|
def getSites(self):
|
||||||
|
sites = self.site_manager.list()
|
||||||
|
# We need to keep self.sites for the backward compatibility with plugins.
|
||||||
|
# Never. Ever. Use it.
|
||||||
|
# TODO: fix plugins
|
||||||
|
self.sites = sites
|
||||||
|
return sites
|
||||||
|
|
||||||
|
def getSite(self, address):
|
||||||
|
return self.getSites().get(address, None)
|
||||||
|
|
||||||
|
def getSiteAddresses(self):
|
||||||
|
# Avoid saving the site list on the stack, since a site may be deleted
|
||||||
|
# from the original list while iterating.
|
||||||
|
# Use the list of addresses instead.
|
||||||
|
return [
|
||||||
|
site.address for site in
|
||||||
|
sorted(list(self.getSites().values()), key=lambda site: site.settings.get("modified", 0), reverse=True)
|
||||||
|
]
|
||||||
|
|
||||||
|
def getRandomPort(self, ip, port_range_from, port_range_to):
|
||||||
|
log.info("Getting random port in range %s-%s..." % (port_range_from, port_range_to))
|
||||||
|
tried = []
|
||||||
|
for bind_retry in range(100):
|
||||||
|
port = random.randint(port_range_from, port_range_to)
|
||||||
|
if port in tried:
|
||||||
|
continue
|
||||||
|
tried.append(port)
|
||||||
|
sock = helper.createSocket(ip)
|
||||||
|
try:
|
||||||
|
sock.bind((ip, port))
|
||||||
|
success = True
|
||||||
|
except Exception as err:
|
||||||
|
log.warning("Error binding to port %s: %s" % (port, err))
|
||||||
|
success = False
|
||||||
|
sock.close()
|
||||||
|
if success:
|
||||||
|
log.info("Found unused random port: %s" % port)
|
||||||
|
return port
|
||||||
|
else:
|
||||||
|
self.sleep(0.1)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def isIpv6Supported(self):
|
||||||
|
if config.tor == "always":
|
||||||
|
return True
|
||||||
|
# Test if we can connect to ipv6 address
|
||||||
|
ipv6_testip = "fcec:ae97:8902:d810:6c92:ec67:efb2:3ec5"
|
||||||
|
try:
|
||||||
|
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||||
|
sock.connect((ipv6_testip, 80))
|
||||||
|
local_ipv6 = sock.getsockname()[0]
|
||||||
|
if local_ipv6 == "::1":
|
||||||
|
log.debug("IPv6 not supported, no local IPv6 address")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
log.debug("IPv6 supported on IP %s" % local_ipv6)
|
||||||
|
return True
|
||||||
|
except socket.error as err:
|
||||||
|
log.warning("IPv6 not supported: %s" % err)
|
||||||
|
return False
|
||||||
|
except Exception as err:
|
||||||
|
log.error("IPv6 check error: %s" % err)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def listenProxy(self):
|
||||||
|
try:
|
||||||
|
self.stream_server_proxy.serve_forever()
|
||||||
|
except Exception as err:
|
||||||
|
if err.errno == 98: # Address already in use error
|
||||||
|
log.debug("StreamServer proxy listen error: %s" % err)
|
||||||
|
else:
|
||||||
|
log.info("StreamServer proxy listen error: %s" % err)
|
||||||
|
|
||||||
|
# Handle request to fileserver
|
||||||
|
def handleRequest(self, connection, message):
|
||||||
|
if config.verbose:
|
||||||
|
if "params" in message:
|
||||||
|
log.debug(
|
||||||
|
"FileRequest: %s %s %s %s" %
|
||||||
|
(str(connection), message["cmd"], message["params"].get("site"), message["params"].get("inner_path"))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
log.debug("FileRequest: %s %s" % (str(connection), message["cmd"]))
|
||||||
|
req = FileRequest(self, connection)
|
||||||
|
req.route(message["cmd"], message.get("req_id"), message.get("params"))
|
||||||
|
if not connection.is_private_ip:
|
||||||
|
self.setInternetStatus(True)
|
||||||
|
|
||||||
|
def onInternetOnline(self):
|
||||||
|
log.info("Internet online")
|
||||||
|
invalid_interval=(
|
||||||
|
self.internet_offline_since - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
|
||||||
|
time.time()
|
||||||
|
)
|
||||||
|
self.invalidateUpdateTime(invalid_interval)
|
||||||
|
self.recheck_port = True
|
||||||
|
self.spawn(self.updateSites)
|
||||||
|
|
||||||
|
# Reload the FileRequest class to prevent restarts in debug mode
|
||||||
|
def reload(self):
|
||||||
|
global FileRequest
|
||||||
|
import imp
|
||||||
|
FileRequest = imp.load_source("FileRequest", "src/File/FileRequest.py").FileRequest
|
||||||
|
|
||||||
|
def portCheck(self):
|
||||||
|
if self.isOfflineMode():
|
||||||
|
log.info("Offline mode: port check disabled")
|
||||||
|
res = {"ipv4": None, "ipv6": None}
|
||||||
|
self.port_opened = res
|
||||||
|
return res
|
||||||
|
|
||||||
|
if config.ip_external:
|
||||||
|
for ip_external in config.ip_external:
|
||||||
|
SiteManager.peer_blacklist.append((ip_external, self.port)) # Add myself to peer blacklist
|
||||||
|
|
||||||
|
ip_external_types = set([self.getIpType(ip) for ip in config.ip_external])
|
||||||
|
res = {
|
||||||
|
"ipv4": "ipv4" in ip_external_types,
|
||||||
|
"ipv6": "ipv6" in ip_external_types
|
||||||
|
}
|
||||||
|
self.ip_external_list = config.ip_external
|
||||||
|
self.port_opened.update(res)
|
||||||
|
log.info("Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"]))
|
||||||
|
return res
|
||||||
|
|
||||||
|
self.port_opened = {}
|
||||||
|
if self.ui_server:
|
||||||
|
self.ui_server.updateWebsocket()
|
||||||
|
|
||||||
|
if "ipv6" in self.supported_ip_types:
|
||||||
|
res_ipv6_thread = self.spawn(self.portchecker.portCheck, self.port, "ipv6")
|
||||||
|
else:
|
||||||
|
res_ipv6_thread = None
|
||||||
|
|
||||||
|
res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
|
||||||
|
if not res_ipv4["opened"] and config.tor != "always":
|
||||||
|
if self.portchecker.portOpen(self.port):
|
||||||
|
res_ipv4 = self.portchecker.portCheck(self.port, "ipv4")
|
||||||
|
|
||||||
|
if res_ipv6_thread is None:
|
||||||
|
res_ipv6 = {"ip": None, "opened": None}
|
||||||
|
else:
|
||||||
|
res_ipv6 = res_ipv6_thread.get()
|
||||||
|
if res_ipv6["opened"] and not self.getIpType(res_ipv6["ip"]) == "ipv6":
|
||||||
|
log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"])
|
||||||
|
res_ipv6["opened"] = False
|
||||||
|
|
||||||
|
self.ip_external_list = []
|
||||||
|
for res_ip in [res_ipv4, res_ipv6]:
|
||||||
|
if res_ip["ip"] and res_ip["ip"] not in self.ip_external_list:
|
||||||
|
self.ip_external_list.append(res_ip["ip"])
|
||||||
|
SiteManager.peer_blacklist.append((res_ip["ip"], self.port))
|
||||||
|
|
||||||
|
log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"]))
|
||||||
|
|
||||||
|
res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]}
|
||||||
|
|
||||||
|
# Add external IPs from local interfaces
|
||||||
|
interface_ips = helper.getInterfaceIps("ipv4")
|
||||||
|
if "ipv6" in self.supported_ip_types:
|
||||||
|
interface_ips += helper.getInterfaceIps("ipv6")
|
||||||
|
for ip in interface_ips:
|
||||||
|
if not helper.isPrivateIp(ip) and ip not in self.ip_external_list:
|
||||||
|
self.ip_external_list.append(ip)
|
||||||
|
res[self.getIpType(ip)] = True # We have opened port if we have external ip
|
||||||
|
SiteManager.peer_blacklist.append((ip, self.port))
|
||||||
|
log.debug("External ip found on interfaces: %s" % ip)
|
||||||
|
|
||||||
|
self.port_opened.update(res)
|
||||||
|
|
||||||
|
if self.ui_server:
|
||||||
|
self.ui_server.updateWebsocket()
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
@util.Noparallel(queue=True)
|
||||||
|
def recheckPort(self):
|
||||||
|
if self.recheck_port:
|
||||||
|
self.portCheck()
|
||||||
|
self.recheck_port = False
|
||||||
|
|
||||||
|
# Returns False if Internet is immediately available
|
||||||
|
# Returns True if we've spent some time waiting for Internet
|
||||||
|
# Returns None if FileServer is stopping or the Offline mode is enabled
|
||||||
|
@util.Noparallel()
|
||||||
|
def waitForInternetOnline(self):
|
||||||
|
if self.isOfflineMode() or self.stopping:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.isInternetOnline():
|
||||||
|
return False
|
||||||
|
|
||||||
|
while not self.isInternetOnline():
|
||||||
|
self.sleep(30)
|
||||||
|
if self.isOfflineMode() or self.stopping:
|
||||||
|
return None
|
||||||
|
if self.isInternetOnline():
|
||||||
|
break
|
||||||
|
if len(self.update_pool) == 0:
|
||||||
|
log.info("Internet connection seems to be broken. Running an update for a random site to check if we are able to connect to any peer.")
|
||||||
|
thread = self.thread_pool.spawn(self.updateRandomSite)
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
self.recheckPort()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def updateRandomSite(self, site_addresses=None, force=False):
|
||||||
|
if not site_addresses:
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
|
||||||
|
site_addresses = random.sample(site_addresses, 1)
|
||||||
|
if len(site_addresses) < 1:
|
||||||
|
return
|
||||||
|
|
||||||
|
address = site_addresses[0]
|
||||||
|
site = self.getSite(address)
|
||||||
|
|
||||||
|
if not site:
|
||||||
|
return
|
||||||
|
|
||||||
|
log.info("Randomly chosen site: %s", site.address_short)
|
||||||
|
|
||||||
|
self.spawnUpdateSite(site).join()
|
||||||
|
|
||||||
|
def updateSite(self, site, check_files=False, verify_files=False):
|
||||||
|
if not site:
|
||||||
|
return
|
||||||
|
if verify_files:
|
||||||
|
mode = 'verify'
|
||||||
|
elif check_files:
|
||||||
|
mode = 'check'
|
||||||
|
else:
|
||||||
|
mode = 'update'
|
||||||
|
log.info("running <%s> for %s" % (mode, site.address_short))
|
||||||
|
site.update2(check_files=check_files, verify_files=verify_files)
|
||||||
|
|
||||||
|
def spawnUpdateSite(self, site, check_files=False, verify_files=False):
|
||||||
|
fake_thread = FakeThread()
|
||||||
|
self.update_threads[site.address] = fake_thread
|
||||||
|
thread = self.update_pool.spawn(self.updateSite, site,
|
||||||
|
check_files=check_files, verify_files=verify_files)
|
||||||
|
self.update_threads[site.address] = thread
|
||||||
|
return thread
|
||||||
|
|
||||||
|
def lookupInUpdatePool(self, site_address):
|
||||||
|
thread = self.update_threads.get(site_address, None)
|
||||||
|
if not thread or thread.ready():
|
||||||
|
return None
|
||||||
|
return thread
|
||||||
|
|
||||||
|
def siteIsInUpdatePool(self, site_address):
|
||||||
|
return self.lookupInUpdatePool(site_address) is not None
|
||||||
|
|
||||||
|
def invalidateUpdateTime(self, invalid_interval):
|
||||||
|
for address in self.getSiteAddresses():
|
||||||
|
site = self.getSite(address)
|
||||||
|
if site:
|
||||||
|
site.invalidateUpdateTime(invalid_interval)
|
||||||
|
|
||||||
|
def isSiteUpdateTimeValid(self, site_address):
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
return False
|
||||||
|
return site.isUpdateTimeValid()
|
||||||
|
|
||||||
|
def updateSites(self):
|
||||||
|
task_nr = self.update_sites_task_next_nr
|
||||||
|
self.update_sites_task_next_nr += 1
|
||||||
|
|
||||||
|
task_description = "updateSites [#%d]" % task_nr
|
||||||
|
log.info("%s: started", task_description)
|
||||||
|
|
||||||
|
# Don't wait port opening on first startup. Do the instant check now.
|
||||||
|
if len(self.getSites()) <= 2:
|
||||||
|
for address, site in list(self.getSites().items()):
|
||||||
|
self.updateSite(site, check_files=True)
|
||||||
|
|
||||||
|
self.recheckPort()
|
||||||
|
|
||||||
|
all_site_addresses = self.getSiteAddresses()
|
||||||
|
site_addresses = [
|
||||||
|
address for address in all_site_addresses
|
||||||
|
if not self.isSiteUpdateTimeValid(address)
|
||||||
|
]
|
||||||
|
|
||||||
|
log.info("%s: chosen %d sites (of %d)", task_description, len(site_addresses), len(all_site_addresses))
|
||||||
|
|
||||||
|
sites_processed = 0
|
||||||
|
sites_skipped = 0
|
||||||
|
start_time = time.time()
|
||||||
|
self.update_start_time = start_time
|
||||||
|
progress_print_time = time.time()
|
||||||
|
|
||||||
|
# Check sites integrity
|
||||||
|
for site_address in site_addresses:
|
||||||
|
site = None
|
||||||
|
self.sleep(1)
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site or site.isUpdateTimeValid() or self.siteIsInUpdatePool(site_address):
|
||||||
|
sites_skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
sites_processed += 1
|
||||||
|
thread = self.spawnUpdateSite(site)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
if time.time() - progress_print_time > 60:
|
||||||
|
progress_print_time = time.time()
|
||||||
|
time_spent = time.time() - start_time
|
||||||
|
time_per_site = time_spent / float(sites_processed)
|
||||||
|
sites_left = len(site_addresses) - sites_processed
|
||||||
|
time_left = time_per_site * sites_left
|
||||||
|
log.info("%s: DONE: %d sites in %.2fs (%.2fs per site); SKIPPED: %d sites; LEFT: %d sites in %.2fs",
|
||||||
|
task_description,
|
||||||
|
sites_processed,
|
||||||
|
time_spent,
|
||||||
|
time_per_site,
|
||||||
|
sites_skipped,
|
||||||
|
sites_left,
|
||||||
|
time_left
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
log.info("%s: stopped", task_description)
|
||||||
|
else:
|
||||||
|
log.info("%s: finished in %.2fs", task_description, time.time() - start_time)
|
||||||
|
|
||||||
|
def peekSiteForVerification(self):
|
||||||
|
check_files_interval = 60 * 60 * 24
|
||||||
|
verify_files_interval = 60 * 60 * 24 * 10
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
random.shuffle(site_addresses)
|
||||||
|
for site_address in site_addresses:
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
mode = site.isFileVerificationExpired(check_files_interval, verify_files_interval)
|
||||||
|
if mode:
|
||||||
|
return (site_address, mode)
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
|
||||||
|
def sitesVerificationThread(self):
|
||||||
|
log.info("sitesVerificationThread started")
|
||||||
|
short_timeout = 20
|
||||||
|
long_timeout = 120
|
||||||
|
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
|
||||||
|
while self.isActiveMode():
|
||||||
|
site = None
|
||||||
|
self.sleep(short_timeout)
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site_address, mode = self.peekSiteForVerification()
|
||||||
|
if not site_address:
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
continue
|
||||||
|
|
||||||
|
while self.siteIsInUpdatePool(site_address) and self.isActiveMode():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if mode == "verify":
|
||||||
|
check_files = False
|
||||||
|
verify_files = True
|
||||||
|
elif mode == "check":
|
||||||
|
check_files = True
|
||||||
|
verify_files = False
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.spawnUpdateSite(site,
|
||||||
|
check_files=check_files, verify_files=verify_files)
|
||||||
|
|
||||||
|
log.info("sitesVerificationThread stopped")
|
||||||
|
|
||||||
|
def sitesMaintenanceThread(self, mode="full"):
|
||||||
|
log.info("sitesMaintenanceThread(%s) started" % mode)
|
||||||
|
|
||||||
|
startup = True
|
||||||
|
|
||||||
|
short_timeout = 2
|
||||||
|
min_long_timeout = 10
|
||||||
|
max_long_timeout = 60 * 10
|
||||||
|
long_timeout = min_long_timeout
|
||||||
|
short_cycle_time_limit = 60 * 2
|
||||||
|
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.sleep(long_timeout)
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
log.debug(
|
||||||
|
"Starting <%s> maintenance cycle: connections=%s, internet=%s",
|
||||||
|
mode,
|
||||||
|
len(self.connections), self.isInternetOnline()
|
||||||
|
)
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
site_addresses = self.getSiteAddresses()
|
||||||
|
|
||||||
|
sites_processed = 0
|
||||||
|
|
||||||
|
for site_address in site_addresses:
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
site = self.getSite(site_address)
|
||||||
|
if not site:
|
||||||
|
continue
|
||||||
|
|
||||||
|
log.debug("Running maintenance for site: %s", site.address_short)
|
||||||
|
|
||||||
|
done = site.runPeriodicMaintenance(startup=startup)
|
||||||
|
site = None
|
||||||
|
if done:
|
||||||
|
sites_processed += 1
|
||||||
|
self.sleep(short_timeout)
|
||||||
|
|
||||||
|
# If we host hundreds of sites, the full maintenance cycle may take very
|
||||||
|
# long time, especially on startup ( > 1 hour).
|
||||||
|
# This means we are not able to run the maintenance procedure for active
|
||||||
|
# sites frequently enough using just a single maintenance thread.
|
||||||
|
# So we run 2 maintenance threads:
|
||||||
|
# * One running full cycles.
|
||||||
|
# * And one running short cycles for the most active sites.
|
||||||
|
# When the short cycle runs out of the time limit, it restarts
|
||||||
|
# from the beginning of the site list.
|
||||||
|
if mode == "short" and time.time() - start_time > short_cycle_time_limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
log.debug("<%s> maintenance cycle finished in %.2fs. Total sites: %d. Processed sites: %d. Timeout: %d",
|
||||||
|
mode,
|
||||||
|
time.time() - start_time,
|
||||||
|
len(site_addresses),
|
||||||
|
sites_processed,
|
||||||
|
long_timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if sites_processed:
|
||||||
|
long_timeout = max(int(long_timeout / 2), min_long_timeout)
|
||||||
|
else:
|
||||||
|
long_timeout = min(long_timeout + 1, max_long_timeout)
|
||||||
|
|
||||||
|
site_addresses = None
|
||||||
|
startup = False
|
||||||
|
log.info("sitesMaintenanceThread(%s) stopped" % mode)
|
||||||
|
|
||||||
|
def keepAliveThread(self):
|
||||||
|
# This thread is mostly useless on a system under load, since it never does
|
||||||
|
# any works, if we have active traffic.
|
||||||
|
#
|
||||||
|
# We should initiate some network activity to detect the Internet outage
|
||||||
|
# and avoid false positives. We normally have some network activity
|
||||||
|
# initiated by various parts on the application as well as network peers.
|
||||||
|
# So it's not a problem.
|
||||||
|
#
|
||||||
|
# However, if it actually happens that we have no network traffic for
|
||||||
|
# some time (say, we host just a couple of inactive sites, and no peers
|
||||||
|
# are interested in connecting to them), we initiate some traffic by
|
||||||
|
# performing the update for a random site. It's way better than just
|
||||||
|
# silly pinging a random peer for no profit.
|
||||||
|
log.info("keepAliveThread started")
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.waitForInternetOnline()
|
||||||
|
|
||||||
|
threshold = self.internet_outage_threshold / 2.0
|
||||||
|
|
||||||
|
self.sleep(threshold / 2.0)
|
||||||
|
|
||||||
|
while self.isActiveMode() and self.shouldThrottleNewConnections():
|
||||||
|
self.sleep(1)
|
||||||
|
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
|
||||||
|
last_activity_time = max(
|
||||||
|
self.last_successful_internet_activity_time,
|
||||||
|
self.last_outgoing_internet_activity_time)
|
||||||
|
now = time.time()
|
||||||
|
if not len(self.getSites()):
|
||||||
|
continue
|
||||||
|
if last_activity_time > now - threshold:
|
||||||
|
continue
|
||||||
|
if len(self.update_pool) != 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
log.info("No network activity for %.2fs. Running an update for a random site.",
|
||||||
|
now - last_activity_time
|
||||||
|
)
|
||||||
|
self.update_pool.spawn(self.updateRandomSite, force=True)
|
||||||
|
log.info("keepAliveThread stopped")
|
||||||
|
|
||||||
|
# Periodic reloading of tracker files
|
||||||
|
def reloadTrackerFilesThread(self):
|
||||||
|
# TODO:
|
||||||
|
# This should probably be more sophisticated.
|
||||||
|
# We should check if the files have actually changed,
|
||||||
|
# and do it more often.
|
||||||
|
log.info("reloadTrackerFilesThread started")
|
||||||
|
interval = 60 * 10
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.sleep(interval)
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
config.loadTrackersFile()
|
||||||
|
log.info("reloadTrackerFilesThread stopped")
|
||||||
|
|
||||||
|
# Detects if computer back from wakeup
|
||||||
|
def wakeupWatcherThread(self):
|
||||||
|
log.info("wakeupWatcherThread started")
|
||||||
|
last_time = time.time()
|
||||||
|
last_my_ips = socket.gethostbyname_ex('')[2]
|
||||||
|
while self.isActiveMode():
|
||||||
|
self.sleep(30)
|
||||||
|
if not self.isActiveMode():
|
||||||
|
break
|
||||||
|
is_time_changed = time.time() - max(self.last_request, last_time) > 60 * 3
|
||||||
|
if is_time_changed:
|
||||||
|
# If taken more than 3 minute then the computer was in sleep mode
|
||||||
|
log.info(
|
||||||
|
"Wakeup detected: time warp from %0.f to %0.f (%0.f sleep seconds), acting like startup..." %
|
||||||
|
(last_time, time.time(), time.time() - last_time)
|
||||||
|
)
|
||||||
|
|
||||||
|
my_ips = socket.gethostbyname_ex('')[2]
|
||||||
|
is_ip_changed = my_ips != last_my_ips
|
||||||
|
if is_ip_changed:
|
||||||
|
log.info("IP change detected from %s to %s" % (last_my_ips, my_ips))
|
||||||
|
|
||||||
|
if is_time_changed or is_ip_changed:
|
||||||
|
invalid_interval=(
|
||||||
|
last_time - self.internet_outage_threshold - random.randint(60 * 5, 60 * 10),
|
||||||
|
time.time()
|
||||||
|
)
|
||||||
|
self.invalidateUpdateTime(invalid_interval)
|
||||||
|
self.recheck_port = True
|
||||||
|
self.spawn(self.updateSites)
|
||||||
|
|
||||||
|
last_time = time.time()
|
||||||
|
last_my_ips = my_ips
|
||||||
|
log.info("wakeupWatcherThread stopped")
|
||||||
|
|
||||||
|
def setOfflineMode(self, offline_mode):
|
||||||
|
ConnectionServer.setOfflineMode(self, offline_mode)
|
||||||
|
self.setupActiveMode()
|
||||||
|
|
||||||
|
def setPassiveMode(self, passive_mode):
|
||||||
|
if self.passive_mode == passive_mode:
|
||||||
|
return
|
||||||
|
self.passive_mode = passive_mode
|
||||||
|
if self.passive_mode:
|
||||||
|
log.info("passive mode is ON");
|
||||||
|
else:
|
||||||
|
log.info("passive mode is OFF");
|
||||||
|
self.setupActiveMode()
|
||||||
|
|
||||||
|
def isPassiveMode(self):
|
||||||
|
return self.passive_mode
|
||||||
|
|
||||||
|
def setupActiveMode(self):
|
||||||
|
active_mode = (not self.passive_mode) and (not self.isOfflineMode())
|
||||||
|
if self.active_mode == active_mode:
|
||||||
|
return
|
||||||
|
self.active_mode = active_mode
|
||||||
|
if self.active_mode:
|
||||||
|
log.info("active mode is ON");
|
||||||
|
self.enterActiveMode();
|
||||||
|
else:
|
||||||
|
log.info("active mode is OFF");
|
||||||
|
self.leaveActiveMode();
|
||||||
|
|
||||||
|
def killActiveModeThreads(self):
|
||||||
|
for key, thread in list(self.active_mode_threads.items()):
|
||||||
|
if thread:
|
||||||
|
if not thread.ready():
|
||||||
|
log.info("killing %s" % key)
|
||||||
|
gevent.kill(thread)
|
||||||
|
del self.active_mode_threads[key]
|
||||||
|
|
||||||
|
def leaveActiveMode(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def enterActiveMode(self):
|
||||||
|
self.killActiveModeThreads()
|
||||||
|
x = self.active_mode_threads
|
||||||
|
p = self.active_mode_thread_pool
|
||||||
|
x["thread_keep_alive"] = p.spawn(self.keepAliveThread)
|
||||||
|
x["thread_wakeup_watcher"] = p.spawn(self.wakeupWatcherThread)
|
||||||
|
x["thread_sites_verification"] = p.spawn(self.sitesVerificationThread)
|
||||||
|
x["thread_reload_tracker_files"] = p.spawn(self.reloadTrackerFilesThread)
|
||||||
|
x["thread_sites_maintenance_full"] = p.spawn(self.sitesMaintenanceThread, mode="full")
|
||||||
|
x["thread_sites_maintenance_short"] = p.spawn(self.sitesMaintenanceThread, mode="short")
|
||||||
|
x["thread_initial_site_updater"] = p.spawn(self.updateSites)
|
||||||
|
|
||||||
|
# Returns True, if an active mode thread should keep going,
|
||||||
|
# i.e active mode is enabled and the server not going to shutdown
|
||||||
|
def isActiveMode(self):
|
||||||
|
self.setupActiveMode()
|
||||||
|
if not self.active_mode:
|
||||||
|
return False
|
||||||
|
if not self.running:
|
||||||
|
return False
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Bind and start serving sites
|
||||||
|
# If passive_mode is False, FileServer starts the full-featured file serving:
|
||||||
|
# * Checks for updates at startup.
|
||||||
|
# * Checks site's integrity.
|
||||||
|
# * Runs periodic update checks.
|
||||||
|
# * Watches for internet being up or down and for computer to wake up and runs update checks.
|
||||||
|
# If passive_mode is True, all the mentioned activity is disabled.
|
||||||
|
def start(self, passive_mode=False, check_sites=None, check_connections=True):
|
||||||
|
|
||||||
|
# Backward compatibility for a misnamed argument:
|
||||||
|
if check_sites is not None:
|
||||||
|
passive_mode = not check_sites
|
||||||
|
|
||||||
|
if self.stopping:
|
||||||
|
return False
|
||||||
|
|
||||||
|
ConnectionServer.start(self, check_connections=check_connections)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.stream_server.start()
|
||||||
|
except Exception as err:
|
||||||
|
log.error("Error listening on: %s:%s: %s" % (self.ip, self.port, err))
|
||||||
|
|
||||||
|
if config.debug:
|
||||||
|
# Auto reload FileRequest on change
|
||||||
|
from Debug import DebugReloader
|
||||||
|
DebugReloader.watcher.addCallback(self.reload)
|
||||||
|
|
||||||
|
# XXX: for initializing self.sites
|
||||||
|
# Remove this line when self.sites gets completely unused
|
||||||
|
self.getSites()
|
||||||
|
|
||||||
|
self.setPassiveMode(passive_mode)
|
||||||
|
|
||||||
|
ConnectionServer.listen(self)
|
||||||
|
|
||||||
|
log.info("Stopped.")
|
||||||
|
|
||||||
|
def stop(self, ui_websocket=None):
|
||||||
|
if self.running and self.portchecker.upnp_port_opened:
|
||||||
|
log.debug('Closing port %d' % self.port)
|
||||||
|
try:
|
||||||
|
self.portchecker.portClose(self.port)
|
||||||
|
log.info('Closed port via upnp.')
|
||||||
|
except Exception as err:
|
||||||
|
log.info("Failed at attempt to use upnp to close port: %s" % err)
|
||||||
|
|
||||||
|
return ConnectionServer.stop(self, ui_websocket=ui_websocket)
|
|
@ -0,0 +1,2 @@
|
||||||
|
from .FileServer import FileServer
|
||||||
|
from .FileRequest import FileRequest
|
|
@ -0,0 +1,529 @@
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import itertools
|
||||||
|
import collections
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
import io
|
||||||
|
from Debug import Debug
|
||||||
|
from Config import config
|
||||||
|
from util import helper
|
||||||
|
from .PeerHashfield import PeerHashfield
|
||||||
|
from Plugin import PluginManager
|
||||||
|
|
||||||
|
if config.use_tempfiles:
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
|
||||||
|
# Communicate remote peers
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class Peer(object):
|
||||||
|
def __init__(self, ip, port, site=None, connection_server=None):
|
||||||
|
self.ip = ip
|
||||||
|
self.port = port
|
||||||
|
self.site = site
|
||||||
|
self.key = "%s:%s" % (ip, port)
|
||||||
|
|
||||||
|
self.ip_type = None
|
||||||
|
|
||||||
|
self.removed = False
|
||||||
|
|
||||||
|
self.log_level = logging.DEBUG
|
||||||
|
self.connection_error_log_level = logging.DEBUG
|
||||||
|
|
||||||
|
self.connection = None
|
||||||
|
self.connection_server = connection_server
|
||||||
|
self.has_hashfield = False # Lazy hashfield object not created yet
|
||||||
|
self.time_hashfield = None # Last time peer's hashfiled downloaded
|
||||||
|
self.time_my_hashfield_sent = None # Last time my hashfield sent to peer
|
||||||
|
self.time_found = time.time() # Time of last found in the torrent tracker
|
||||||
|
self.time_response = 0 # Time of last successful response from peer
|
||||||
|
self.time_added = time.time()
|
||||||
|
self.last_ping = None # Last response time for ping
|
||||||
|
self.last_pex = 0 # Last query/response time for pex
|
||||||
|
self.is_tracker_connection = False # Tracker connection instead of normal peer
|
||||||
|
self.reputation = 0 # More likely to connect if larger
|
||||||
|
self.last_content_json_update = 0.0 # Modify date of last received content.json
|
||||||
|
self.protected = 0
|
||||||
|
self.reachable = None
|
||||||
|
|
||||||
|
self.connection_error = 0 # Series of connection error
|
||||||
|
self.hash_failed = 0 # Number of bad files from peer
|
||||||
|
self.download_bytes = 0 # Bytes downloaded
|
||||||
|
self.download_time = 0 # Time spent to download
|
||||||
|
|
||||||
|
self.protectedRequests = ["getFile", "streamFile", "update", "listModified"]
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
if key == "hashfield":
|
||||||
|
self.has_hashfield = True
|
||||||
|
self.hashfield = PeerHashfield()
|
||||||
|
return self.hashfield
|
||||||
|
else:
|
||||||
|
# Raise appropriately formatted attribute error
|
||||||
|
return object.__getattribute__(self, key)
|
||||||
|
|
||||||
|
def log(self, text, log_level = None):
|
||||||
|
if log_level is None:
|
||||||
|
log_level = self.log_level
|
||||||
|
if log_level <= logging.DEBUG:
|
||||||
|
if not config.verbose:
|
||||||
|
return # Only log if we are in debug mode
|
||||||
|
|
||||||
|
logger = None
|
||||||
|
|
||||||
|
if self.site:
|
||||||
|
logger = self.site.log
|
||||||
|
else:
|
||||||
|
logger = logging.getLogger()
|
||||||
|
|
||||||
|
logger.log(log_level, "%s:%s %s" % (self.ip, self.port, text))
|
||||||
|
|
||||||
|
# Protect connection from being closed by site.cleanupPeers()
|
||||||
|
def markProtected(self, interval=60*2):
|
||||||
|
self.protected = max(self.protected, time.time() + interval)
|
||||||
|
|
||||||
|
def isProtected(self):
|
||||||
|
if self.protected > 0:
|
||||||
|
if self.protected < time.time():
|
||||||
|
self.protected = 0
|
||||||
|
return self.protected > 0
|
||||||
|
|
||||||
|
def isTtlExpired(self, ttl):
|
||||||
|
last_activity = max(self.time_found, self.time_response)
|
||||||
|
return (time.time() - last_activity) > ttl
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def isConnected(self):
|
||||||
|
if self.connection and not self.connection.connected:
|
||||||
|
self.connection = None
|
||||||
|
return self.connection and self.connection.connected
|
||||||
|
|
||||||
|
# Peer proved to to be connectable recently
|
||||||
|
# Since 0.8.0
|
||||||
|
def isConnectable(self):
|
||||||
|
if self.connection_error >= 1: # The last connection attempt failed
|
||||||
|
return False
|
||||||
|
if time.time() - self.time_response > 60 * 60 * 2: # Last successful response more than 2 hours ago
|
||||||
|
return False
|
||||||
|
return self.isReachable()
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def isReachable(self):
|
||||||
|
if self.reachable is None:
|
||||||
|
self.updateCachedState()
|
||||||
|
return self.reachable
|
||||||
|
|
||||||
|
# Since 0.8.0
|
||||||
|
def getIpType(self):
|
||||||
|
if not self.ip_type:
|
||||||
|
self.updateCachedState()
|
||||||
|
return self.ip_type
|
||||||
|
|
||||||
|
# We cache some ConnectionServer-related state for better performance.
|
||||||
|
# This kind of state currently doesn't change during a program session,
|
||||||
|
# and it's safe to read and cache it just once. But future versions
|
||||||
|
# may bring more pieces of dynamic configuration. So we update the state
|
||||||
|
# on each peer.found().
|
||||||
|
def updateCachedState(self):
|
||||||
|
connection_server = self.getConnectionServer()
|
||||||
|
if not self.port or self.port == 1: # Port 1 considered as "no open port"
|
||||||
|
self.reachable = False
|
||||||
|
else:
|
||||||
|
self.reachable = connection_server.isIpReachable(self.ip)
|
||||||
|
self.ip_type = connection_server.getIpType(self.ip)
|
||||||
|
|
||||||
|
|
||||||
|
# FIXME:
|
||||||
|
# This should probably be changed.
|
||||||
|
# When creating a peer object, the caller must provide either `connection_server`,
|
||||||
|
# or `site`, so Peer object is able to use `site.connection_server`.
|
||||||
|
def getConnectionServer(self):
|
||||||
|
if self.connection_server:
|
||||||
|
connection_server = self.connection_server
|
||||||
|
elif self.site:
|
||||||
|
connection_server = self.site.connection_server
|
||||||
|
else:
|
||||||
|
import main
|
||||||
|
connection_server = main.file_server
|
||||||
|
return connection_server
|
||||||
|
|
||||||
|
# Connect to host
|
||||||
|
def connect(self, connection=None):
|
||||||
|
if self.reputation < -10:
|
||||||
|
self.reputation = -10
|
||||||
|
if self.reputation > 10:
|
||||||
|
self.reputation = 10
|
||||||
|
|
||||||
|
if self.connection:
|
||||||
|
self.log("Getting connection (Closing %s)..." % self.connection)
|
||||||
|
self.connection.close("Connection change")
|
||||||
|
else:
|
||||||
|
self.log("Getting connection (reputation: %s)..." % self.reputation)
|
||||||
|
|
||||||
|
if connection: # Connection specified
|
||||||
|
self.log("Assigning connection %s" % connection)
|
||||||
|
self.connection = connection
|
||||||
|
self.connection.sites += 1
|
||||||
|
else: # Try to find from connection pool or create new connection
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
connection_server = self.getConnectionServer()
|
||||||
|
self.connection = connection_server.getConnection(self.ip, self.port, site=self.site, is_tracker_connection=self.is_tracker_connection)
|
||||||
|
if self.connection and self.connection.connected:
|
||||||
|
self.reputation += 1
|
||||||
|
self.connection.sites += 1
|
||||||
|
except Exception as err:
|
||||||
|
self.onConnectionError("Getting connection error")
|
||||||
|
self.log("Getting connection error: %s (connection_error: %s, hash_failed: %s)" %
|
||||||
|
(Debug.formatException(err), self.connection_error, self.hash_failed),
|
||||||
|
log_level=self.connection_error_log_level)
|
||||||
|
self.connection = None
|
||||||
|
return self.connection
|
||||||
|
|
||||||
|
def disconnect(self, reason="Unknown"):
|
||||||
|
if self.connection:
|
||||||
|
self.connection.close(reason)
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
|
# Check if we have connection to peer
|
||||||
|
def findConnection(self):
|
||||||
|
if self.connection and self.connection.connected: # We have connection to peer
|
||||||
|
return self.connection
|
||||||
|
else: # Try to find from other sites connections
|
||||||
|
self.connection = self.getConnectionServer().getConnection(self.ip, self.port, create=False, site=self.site)
|
||||||
|
if self.connection:
|
||||||
|
self.connection.sites += 1
|
||||||
|
return self.connection
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.site:
|
||||||
|
return "Peer:%-12s of %s" % (self.ip, self.site.address_short)
|
||||||
|
else:
|
||||||
|
return "Peer:%-12s" % self.ip
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s>" % self.__str__()
|
||||||
|
|
||||||
|
def packMyAddress(self):
|
||||||
|
if self.ip.endswith(".onion"):
|
||||||
|
return helper.packOnionAddress(self.ip, self.port)
|
||||||
|
else:
|
||||||
|
return helper.packAddress(self.ip, self.port)
|
||||||
|
|
||||||
|
# Found a peer from a source
|
||||||
|
def found(self, source="other"):
|
||||||
|
if self.reputation < 5:
|
||||||
|
if source == "tracker":
|
||||||
|
if self.ip.endswith(".onion"):
|
||||||
|
self.reputation += 1
|
||||||
|
else:
|
||||||
|
self.reputation += 2
|
||||||
|
elif source == "local":
|
||||||
|
self.reputation += 20
|
||||||
|
|
||||||
|
if source in ("tracker", "local"):
|
||||||
|
self.site.peers_recent.appendleft(self)
|
||||||
|
self.time_found = time.time()
|
||||||
|
self.updateCachedState()
|
||||||
|
|
||||||
|
# Send a command to peer and return response value
|
||||||
|
def request(self, cmd, params={}, stream_to=None):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not self.connection or self.connection.closed:
|
||||||
|
self.connect()
|
||||||
|
if not self.connection:
|
||||||
|
self.onConnectionError("Reconnect error")
|
||||||
|
return None # Connection failed
|
||||||
|
|
||||||
|
self.log("Send request: %s %s %s %s" % (params.get("site", ""), cmd, params.get("inner_path", ""), params.get("location", "")))
|
||||||
|
|
||||||
|
for retry in range(1, 4): # Retry 3 times
|
||||||
|
try:
|
||||||
|
if cmd in self.protectedRequests:
|
||||||
|
self.markProtected()
|
||||||
|
if not self.connection:
|
||||||
|
raise Exception("No connection found")
|
||||||
|
res = self.connection.request(cmd, params, stream_to)
|
||||||
|
if not res:
|
||||||
|
raise Exception("Send error")
|
||||||
|
if "error" in res:
|
||||||
|
self.log("%s error: %s" % (cmd, res["error"]))
|
||||||
|
self.onConnectionError("Response error")
|
||||||
|
break
|
||||||
|
else: # Successful request, reset connection error num
|
||||||
|
self.connection_error = 0
|
||||||
|
self.time_response = time.time()
|
||||||
|
if res:
|
||||||
|
return res
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % res)
|
||||||
|
except Exception as err:
|
||||||
|
if type(err).__name__ == "Notify": # Greenlet killed by worker
|
||||||
|
self.log("Peer worker got killed: %s, aborting cmd: %s" % (err.message, cmd))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.onConnectionError("Request error")
|
||||||
|
self.log(
|
||||||
|
"%s (connection_error: %s, hash_failed: %s, retry: %s)" %
|
||||||
|
(Debug.formatException(err), self.connection_error, self.hash_failed, retry)
|
||||||
|
)
|
||||||
|
time.sleep(1 * retry)
|
||||||
|
self.connect()
|
||||||
|
return None # Failed after 4 retry
|
||||||
|
|
||||||
|
# Get a file content from peer
|
||||||
|
def getFile(self, site, inner_path, file_size=None, pos_from=0, pos_to=None, streaming=False):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if file_size and file_size > 5 * 1024 * 1024:
|
||||||
|
max_read_size = 1024 * 1024
|
||||||
|
else:
|
||||||
|
max_read_size = 512 * 1024
|
||||||
|
|
||||||
|
if pos_to:
|
||||||
|
read_bytes = min(max_read_size, pos_to - pos_from)
|
||||||
|
else:
|
||||||
|
read_bytes = max_read_size
|
||||||
|
|
||||||
|
location = pos_from
|
||||||
|
|
||||||
|
if config.use_tempfiles:
|
||||||
|
buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b')
|
||||||
|
else:
|
||||||
|
buff = io.BytesIO()
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
while True: # Read in smaller parts
|
||||||
|
if config.stream_downloads or read_bytes > 256 * 1024 or streaming:
|
||||||
|
res = self.request("streamFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size}, stream_to=buff)
|
||||||
|
if not res or "location" not in res: # Error
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
self.log("Send: %s" % inner_path)
|
||||||
|
res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location, "read_bytes": read_bytes, "file_size": file_size})
|
||||||
|
if not res or "location" not in res: # Error
|
||||||
|
return False
|
||||||
|
self.log("Recv: %s" % inner_path)
|
||||||
|
buff.write(res["body"])
|
||||||
|
res["body"] = None # Save memory
|
||||||
|
|
||||||
|
if res["location"] == res["size"] or res["location"] == pos_to: # End of file
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
location = res["location"]
|
||||||
|
if pos_to:
|
||||||
|
read_bytes = min(max_read_size, pos_to - location)
|
||||||
|
|
||||||
|
if pos_to:
|
||||||
|
recv = pos_to - pos_from
|
||||||
|
else:
|
||||||
|
recv = res["location"]
|
||||||
|
|
||||||
|
self.download_bytes += recv
|
||||||
|
self.download_time += (time.time() - s)
|
||||||
|
if self.site:
|
||||||
|
self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + recv
|
||||||
|
self.log("Downloaded: %s, pos: %s, read_bytes: %s" % (inner_path, buff.tell(), read_bytes))
|
||||||
|
buff.seek(0)
|
||||||
|
return buff
|
||||||
|
|
||||||
|
# Send a ping request
|
||||||
|
def ping(self, timeout=10.0, tryes=3):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
response_time = None
|
||||||
|
for retry in range(1, tryes): # Retry 3 times
|
||||||
|
s = time.time()
|
||||||
|
with gevent.Timeout(timeout, False):
|
||||||
|
res = self.request("ping")
|
||||||
|
|
||||||
|
if res and "body" in res and res["body"] == b"Pong!":
|
||||||
|
response_time = time.time() - s
|
||||||
|
break # All fine, exit from for loop
|
||||||
|
# Timeout reached or bad response
|
||||||
|
self.onConnectionError("Ping timeout")
|
||||||
|
self.connect()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
if response_time:
|
||||||
|
self.log("Ping: %.3f" % response_time)
|
||||||
|
else:
|
||||||
|
self.log("Ping failed")
|
||||||
|
self.last_ping = response_time
|
||||||
|
return response_time
|
||||||
|
|
||||||
|
# Request peer exchange from peer
|
||||||
|
def pex(self, site=None, need_num=5, request_interval=60*2):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not site:
|
||||||
|
site = self.site # If no site defined request peers for this site
|
||||||
|
|
||||||
|
if self.last_pex + request_interval >= time.time():
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.last_pex = time.time()
|
||||||
|
|
||||||
|
# give back 5 connectible peers
|
||||||
|
packed_peers = helper.packPeers(self.site.getConnectablePeers(5, allow_private=False))
|
||||||
|
request = {"site": site.address, "peers": packed_peers["ipv4"], "need": need_num}
|
||||||
|
if packed_peers["onion"]:
|
||||||
|
request["peers_onion"] = packed_peers["onion"]
|
||||||
|
if packed_peers["ipv6"]:
|
||||||
|
request["peers_ipv6"] = packed_peers["ipv6"]
|
||||||
|
res = self.request("pex", request)
|
||||||
|
self.last_pex = time.time()
|
||||||
|
if not res or "error" in res:
|
||||||
|
return False
|
||||||
|
added = 0
|
||||||
|
|
||||||
|
# Remove unsupported peer types
|
||||||
|
if "peers_ipv6" in res and self.connection and "ipv6" not in self.connection.server.supported_ip_types:
|
||||||
|
del res["peers_ipv6"]
|
||||||
|
|
||||||
|
if "peers_onion" in res and self.connection and "onion" not in self.connection.server.supported_ip_types:
|
||||||
|
del res["peers_onion"]
|
||||||
|
|
||||||
|
# Add IPv4 + IPv6
|
||||||
|
for peer in itertools.chain(res.get("peers", []), res.get("peers_ipv6", [])):
|
||||||
|
address = helper.unpackAddress(peer)
|
||||||
|
if site.addPeer(*address, source="pex"):
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
# Add Onion
|
||||||
|
for peer in res.get("peers_onion", []):
|
||||||
|
address = helper.unpackOnionAddress(peer)
|
||||||
|
if site.addPeer(*address, source="pex"):
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
if added:
|
||||||
|
self.log("Added peers using pex: %s" % added)
|
||||||
|
|
||||||
|
return added
|
||||||
|
|
||||||
|
# List modified files since the date
|
||||||
|
# Return: {inner_path: modification date,...}
|
||||||
|
def listModified(self, since):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
return self.request("listModified", {"since": since, "site": self.site.address})
|
||||||
|
|
||||||
|
def updateHashfield(self, force=False):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Don't update hashfield again in 5 min
|
||||||
|
if self.time_hashfield and time.time() - self.time_hashfield < 5 * 60 and not force:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.time_hashfield = time.time()
|
||||||
|
res = self.request("getHashfield", {"site": self.site.address})
|
||||||
|
if not res or "error" in res or "hashfield_raw" not in res:
|
||||||
|
return False
|
||||||
|
self.hashfield.replaceFromBytes(res["hashfield_raw"])
|
||||||
|
|
||||||
|
return self.hashfield
|
||||||
|
|
||||||
|
# Find peers for hashids
|
||||||
|
# Return: {hash1: ["ip:port", "ip:port",...],...}
|
||||||
|
def findHashIds(self, hash_ids):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
res = self.request("findHashIds", {"site": self.site.address, "hash_ids": hash_ids})
|
||||||
|
if not res or "error" in res or type(res) is not dict:
|
||||||
|
return False
|
||||||
|
|
||||||
|
back = collections.defaultdict(list)
|
||||||
|
|
||||||
|
for ip_type in ["ipv4", "ipv6", "onion"]:
|
||||||
|
if ip_type == "ipv4":
|
||||||
|
key = "peers"
|
||||||
|
else:
|
||||||
|
key = "peers_%s" % ip_type
|
||||||
|
for hash, peers in list(res.get(key, {}).items())[0:30]:
|
||||||
|
if ip_type == "onion":
|
||||||
|
unpacker_func = helper.unpackOnionAddress
|
||||||
|
else:
|
||||||
|
unpacker_func = helper.unpackAddress
|
||||||
|
|
||||||
|
back[hash] += list(map(unpacker_func, peers))
|
||||||
|
|
||||||
|
for hash in res.get("my", []):
|
||||||
|
if self.connection:
|
||||||
|
back[hash].append((self.connection.ip, self.connection.port))
|
||||||
|
else:
|
||||||
|
back[hash].append((self.ip, self.port))
|
||||||
|
|
||||||
|
return back
|
||||||
|
|
||||||
|
# Send my hashfield to peer
|
||||||
|
# Return: True if sent
|
||||||
|
def sendMyHashfield(self):
|
||||||
|
if self.connection and self.connection.handshake.get("rev", 0) < 510:
|
||||||
|
return False # Not supported
|
||||||
|
if self.time_my_hashfield_sent and self.site.content_manager.hashfield.time_changed <= self.time_my_hashfield_sent:
|
||||||
|
return False # Peer already has the latest hashfield
|
||||||
|
|
||||||
|
res = self.request("setHashfield", {"site": self.site.address, "hashfield_raw": self.site.content_manager.hashfield.tobytes()})
|
||||||
|
if not res or "error" in res:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
self.time_my_hashfield_sent = time.time()
|
||||||
|
return True
|
||||||
|
|
||||||
|
def publish(self, address, inner_path, body, modified, diffs=[]):
|
||||||
|
if self.removed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if len(body) > 10 * 1024 and self.connection and self.connection.handshake.get("rev", 0) >= 4095:
|
||||||
|
# To save bw we don't push big content.json to peers
|
||||||
|
body = b""
|
||||||
|
|
||||||
|
return self.request("update", {
|
||||||
|
"site": address,
|
||||||
|
"inner_path": inner_path,
|
||||||
|
"body": body,
|
||||||
|
"modified": modified,
|
||||||
|
"diffs": diffs
|
||||||
|
})
|
||||||
|
|
||||||
|
# Stop and remove from site
|
||||||
|
def remove(self, reason="Removing"):
|
||||||
|
self.removed = True
|
||||||
|
self.log("Removing peer with reason: <%s>. Connection error: %s, Hash failed: %s" % (reason, self.connection_error, self.hash_failed))
|
||||||
|
if self.site:
|
||||||
|
self.site.deregisterPeer(self)
|
||||||
|
# No way: self.site = None
|
||||||
|
# We don't assign None to self.site here because it leads to random exceptions in various threads,
|
||||||
|
# that hold references to the peer and still believe it belongs to the site.
|
||||||
|
|
||||||
|
self.disconnect(reason)
|
||||||
|
|
||||||
|
# - EVENTS -
|
||||||
|
|
||||||
|
# On connection error
|
||||||
|
def onConnectionError(self, reason="Unknown"):
|
||||||
|
if not self.getConnectionServer().isInternetOnline():
|
||||||
|
return
|
||||||
|
self.connection_error += 1
|
||||||
|
if self.site and len(self.site.peers) > 200:
|
||||||
|
limit = 3
|
||||||
|
else:
|
||||||
|
limit = 6
|
||||||
|
self.reputation -= 1
|
||||||
|
if self.connection_error >= limit: # Dead peer
|
||||||
|
self.remove("Connection error limit reached: %s. Provided message: %s" % (limit, reason))
|
||||||
|
|
||||||
|
# Done working with peer
|
||||||
|
def onWorkerDone(self):
|
||||||
|
pass
|
|
@ -0,0 +1,75 @@
|
||||||
|
import array
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class PeerHashfield(object):
|
||||||
|
__slots__ = ("storage", "time_changed", "append", "remove", "tobytes", "frombytes", "__len__", "__iter__")
|
||||||
|
def __init__(self):
|
||||||
|
self.storage = self.createStorage()
|
||||||
|
self.time_changed = time.time()
|
||||||
|
|
||||||
|
def createStorage(self):
|
||||||
|
storage = array.array("H")
|
||||||
|
self.append = storage.append
|
||||||
|
self.remove = storage.remove
|
||||||
|
self.tobytes = storage.tobytes
|
||||||
|
self.frombytes = storage.frombytes
|
||||||
|
self.__len__ = storage.__len__
|
||||||
|
self.__iter__ = storage.__iter__
|
||||||
|
return storage
|
||||||
|
|
||||||
|
def appendHash(self, hash):
|
||||||
|
hash_id = int(hash[0:4], 16)
|
||||||
|
if hash_id not in self.storage:
|
||||||
|
self.storage.append(hash_id)
|
||||||
|
self.time_changed = time.time()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def appendHashId(self, hash_id):
|
||||||
|
if hash_id not in self.storage:
|
||||||
|
self.storage.append(hash_id)
|
||||||
|
self.time_changed = time.time()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def removeHash(self, hash):
|
||||||
|
hash_id = int(hash[0:4], 16)
|
||||||
|
if hash_id in self.storage:
|
||||||
|
self.storage.remove(hash_id)
|
||||||
|
self.time_changed = time.time()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def removeHashId(self, hash_id):
|
||||||
|
if hash_id in self.storage:
|
||||||
|
self.storage.remove(hash_id)
|
||||||
|
self.time_changed = time.time()
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getHashId(self, hash):
|
||||||
|
return int(hash[0:4], 16)
|
||||||
|
|
||||||
|
def hasHash(self, hash):
|
||||||
|
return int(hash[0:4], 16) in self.storage
|
||||||
|
|
||||||
|
def replaceFromBytes(self, hashfield_raw):
|
||||||
|
self.storage = self.createStorage()
|
||||||
|
self.storage.frombytes(hashfield_raw)
|
||||||
|
self.time_changed = time.time()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
field = PeerHashfield()
|
||||||
|
s = time.time()
|
||||||
|
for i in range(10000):
|
||||||
|
field.appendHashId(i)
|
||||||
|
print(time.time()-s)
|
||||||
|
s = time.time()
|
||||||
|
for i in range(10000):
|
||||||
|
field.hasHash("AABB")
|
||||||
|
print(time.time()-s)
|
|
@ -0,0 +1,189 @@
|
||||||
|
import logging
|
||||||
|
import urllib.request
|
||||||
|
import urllib.parse
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from Debug import Debug
|
||||||
|
from util import UpnpPunch
|
||||||
|
|
||||||
|
|
||||||
|
class PeerPortchecker(object):
|
||||||
|
checker_functions = {
|
||||||
|
"ipv4": ["checkIpfingerprints", "checkCanyouseeme"],
|
||||||
|
"ipv6": ["checkMyaddr", "checkIpv6scanner"]
|
||||||
|
}
|
||||||
|
def __init__(self, file_server):
|
||||||
|
self.log = logging.getLogger("PeerPortchecker")
|
||||||
|
self.upnp_port_opened = False
|
||||||
|
self.file_server = file_server
|
||||||
|
|
||||||
|
def requestUrl(self, url, post_data=None):
|
||||||
|
if type(post_data) is dict:
|
||||||
|
post_data = urllib.parse.urlencode(post_data).encode("utf8")
|
||||||
|
req = urllib.request.Request(url, post_data)
|
||||||
|
req.add_header("Referer", url)
|
||||||
|
req.add_header("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11")
|
||||||
|
req.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
|
||||||
|
return urllib.request.urlopen(req, timeout=20.0)
|
||||||
|
|
||||||
|
def portOpen(self, port):
|
||||||
|
self.log.info("Trying to open port using UpnpPunch...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
UpnpPunch.ask_to_open_port(port, 'ZeroNet', retries=3, protos=["TCP"])
|
||||||
|
self.upnp_port_opened = True
|
||||||
|
except Exception as err:
|
||||||
|
self.log.warning("UpnpPunch run error: %s" % Debug.formatException(err))
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def portClose(self, port):
|
||||||
|
return UpnpPunch.ask_to_close_port(port, protos=["TCP"])
|
||||||
|
|
||||||
|
def portCheck(self, port, ip_type="ipv4"):
|
||||||
|
checker_functions = self.checker_functions[ip_type]
|
||||||
|
|
||||||
|
for func_name in checker_functions:
|
||||||
|
func = getattr(self, func_name)
|
||||||
|
s = time.time()
|
||||||
|
try:
|
||||||
|
res = func(port)
|
||||||
|
if res:
|
||||||
|
self.log.info(
|
||||||
|
"Checked port %s (%s) using %s result: %s in %.3fs" %
|
||||||
|
(port, ip_type, func_name, res, time.time() - s)
|
||||||
|
)
|
||||||
|
time.sleep(0.1)
|
||||||
|
if res["opened"] and not self.file_server.had_external_incoming:
|
||||||
|
res["opened"] = False
|
||||||
|
self.log.warning("Port %s:%s looks opened, but no incoming connection" % (res["ip"], port))
|
||||||
|
break
|
||||||
|
except Exception as err:
|
||||||
|
self.log.warning(
|
||||||
|
"%s check error: %s in %.3fs" %
|
||||||
|
(func_name, Debug.formatException(err), time.time() - s)
|
||||||
|
)
|
||||||
|
res = {"ip": None, "opened": False}
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def checkCanyouseeme(self, port):
|
||||||
|
data = urllib.request.urlopen("https://www.canyouseeme.org/", b"ip=1.1.1.1&port=%s" % str(port).encode("ascii"), timeout=20.0).read().decode("utf8")
|
||||||
|
|
||||||
|
message = re.match(r'.*<p style="padding-left:15px">(.*?)</p>', data, re.DOTALL).group(1)
|
||||||
|
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace(" ", " ")) # Strip http tags
|
||||||
|
|
||||||
|
match = re.match(r".*service on (.*?) on", message)
|
||||||
|
if match:
|
||||||
|
ip = match.group(1)
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
if "Success" in message:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "Error" in message:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
def checkIpfingerprints(self, port):
|
||||||
|
data = self.requestUrl("https://www.ipfingerprints.com/portscan.php").read().decode("utf8")
|
||||||
|
ip = re.match(r'.*name="remoteHost".*?value="(.*?)"', data, re.DOTALL).group(1)
|
||||||
|
|
||||||
|
post_data = {
|
||||||
|
"remoteHost": ip, "start_port": port, "end_port": port,
|
||||||
|
"normalScan": "Yes", "scan_type": "connect2", "ping_type": "none"
|
||||||
|
}
|
||||||
|
message = self.requestUrl("https://www.ipfingerprints.com/scripts/getPortsInfo.php", post_data).read().decode("utf8")
|
||||||
|
|
||||||
|
if "open" in message:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "filtered" in message or "closed" in message:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
def checkMyaddr(self, port):
|
||||||
|
url = "http://ipv6.my-addr.com/online-ipv6-port-scan.php"
|
||||||
|
|
||||||
|
data = self.requestUrl(url).read().decode("utf8")
|
||||||
|
|
||||||
|
ip = re.match(r'.*Your IP address is:[ ]*([0-9\.:a-z]+)', data.replace(" ", ""), re.DOTALL).group(1)
|
||||||
|
|
||||||
|
post_data = {"addr": ip, "ports_selected": "", "ports_list": port}
|
||||||
|
data = self.requestUrl(url, post_data).read().decode("utf8")
|
||||||
|
|
||||||
|
message = re.match(r".*<table class='table_font_16'>(.*?)</table>", data, re.DOTALL).group(1)
|
||||||
|
|
||||||
|
if "ok.png" in message:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "fail.png" in message:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
def checkIpv6scanner(self, port):
|
||||||
|
url = "http://www.ipv6scanner.com/cgi-bin/main.py"
|
||||||
|
|
||||||
|
data = self.requestUrl(url).read().decode("utf8")
|
||||||
|
|
||||||
|
ip = re.match(r'.*Your IP address is[ ]*([0-9\.:a-z]+)', data.replace(" ", ""), re.DOTALL).group(1)
|
||||||
|
|
||||||
|
post_data = {"host": ip, "scanType": "1", "port": port, "protocol": "tcp", "authorized": "yes"}
|
||||||
|
data = self.requestUrl(url, post_data).read().decode("utf8")
|
||||||
|
|
||||||
|
message = re.match(r".*<table id='scantable'>(.*?)</table>", data, re.DOTALL).group(1)
|
||||||
|
message_text = re.sub("<.*?>", " ", message.replace("<br>", " ").replace(" ", " ").strip()) # Strip http tags
|
||||||
|
|
||||||
|
if "OPEN" in message_text:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "CLOSED" in message_text or "FILTERED" in message_text:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message_text)
|
||||||
|
|
||||||
|
def checkPortchecker(self, port): # Not working: Forbidden
|
||||||
|
data = self.requestUrl("https://portchecker.co").read().decode("utf8")
|
||||||
|
csrf = re.match(r'.*name="_csrf" value="(.*?)"', data, re.DOTALL).group(1)
|
||||||
|
|
||||||
|
data = self.requestUrl("https://portchecker.co", {"port": port, "_csrf": csrf}).read().decode("utf8")
|
||||||
|
message = re.match(r'.*<div id="results-wrapper">(.*?)</div>', data, re.DOTALL).group(1)
|
||||||
|
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace(" ", " ").strip()) # Strip http tags
|
||||||
|
|
||||||
|
match = re.match(r".*targetIP.*?value=\"(.*?)\"", data, re.DOTALL)
|
||||||
|
if match:
|
||||||
|
ip = match.group(1)
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
if "open" in message:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "closed" in message:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
||||||
|
|
||||||
|
def checkSubnetonline(self, port): # Not working: Invalid response
|
||||||
|
url = "https://www.subnetonline.com/pages/ipv6-network-tools/online-ipv6-port-scanner.php"
|
||||||
|
|
||||||
|
data = self.requestUrl(url).read().decode("utf8")
|
||||||
|
|
||||||
|
ip = re.match(r'.*Your IP is.*?name="host".*?value="(.*?)"', data, re.DOTALL).group(1)
|
||||||
|
token = re.match(r'.*name="token".*?value="(.*?)"', data, re.DOTALL).group(1)
|
||||||
|
|
||||||
|
post_data = {"host": ip, "port": port, "allow": "on", "token": token, "submit": "Scanning.."}
|
||||||
|
data = self.requestUrl(url, post_data).read().decode("utf8")
|
||||||
|
|
||||||
|
print(post_data, data)
|
||||||
|
|
||||||
|
message = re.match(r".*<div class='formfield'>(.*?)</div>", data, re.DOTALL).group(1)
|
||||||
|
message = re.sub(r"<.*?>", "", message.replace("<br>", " ").replace(" ", " ").strip()) # Strip http tags
|
||||||
|
|
||||||
|
if "online" in message:
|
||||||
|
return {"ip": ip, "opened": True}
|
||||||
|
elif "closed" in message:
|
||||||
|
return {"ip": ip, "opened": False}
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid response: %s" % message)
|
|
@ -0,0 +1,2 @@
|
||||||
|
from .Peer import Peer
|
||||||
|
from .PeerHashfield import PeerHashfield
|
|
@ -0,0 +1,287 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import json
|
||||||
|
|
||||||
|
from Debug import Debug
|
||||||
|
from Config import config
|
||||||
|
import plugins
|
||||||
|
|
||||||
|
|
||||||
|
class PluginManager:
|
||||||
|
def __init__(self):
|
||||||
|
self.log = logging.getLogger("PluginManager")
|
||||||
|
self.path_plugins = os.path.abspath(os.path.dirname(plugins.__file__))
|
||||||
|
self.path_installed_plugins = config.data_dir + "/__plugins__"
|
||||||
|
self.plugins = defaultdict(list) # Registered plugins (key: class name, value: list of plugins for class)
|
||||||
|
self.subclass_order = {} # Record the load order of the plugins, to keep it after reload
|
||||||
|
self.pluggable = {}
|
||||||
|
self.plugin_names = [] # Loaded plugin names
|
||||||
|
self.plugins_updated = {} # List of updated plugins since restart
|
||||||
|
self.plugins_rev = {} # Installed plugins revision numbers
|
||||||
|
self.after_load = [] # Execute functions after loaded plugins
|
||||||
|
self.function_flags = {} # Flag function for permissions
|
||||||
|
self.reloading = False
|
||||||
|
self.config_path = config.data_dir + "/plugins.json"
|
||||||
|
self.loadConfig()
|
||||||
|
|
||||||
|
self.config.setdefault("builtin", {})
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.getcwd(), self.path_plugins))
|
||||||
|
self.migratePlugins()
|
||||||
|
|
||||||
|
if config.debug: # Auto reload Plugins on file change
|
||||||
|
from Debug import DebugReloader
|
||||||
|
DebugReloader.watcher.addCallback(self.reloadPlugins)
|
||||||
|
|
||||||
|
def loadConfig(self):
|
||||||
|
if os.path.isfile(self.config_path):
|
||||||
|
try:
|
||||||
|
self.config = json.load(open(self.config_path, encoding="utf8"))
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Error loading %s: %s" % (self.config_path, err))
|
||||||
|
self.config = {}
|
||||||
|
else:
|
||||||
|
self.config = {}
|
||||||
|
|
||||||
|
def saveConfig(self):
|
||||||
|
f = open(self.config_path, "w", encoding="utf8")
|
||||||
|
json.dump(self.config, f, ensure_ascii=False, sort_keys=True, indent=2)
|
||||||
|
|
||||||
|
def migratePlugins(self):
|
||||||
|
for dir_name in os.listdir(self.path_plugins):
|
||||||
|
if dir_name == "Mute":
|
||||||
|
self.log.info("Deleting deprecated/renamed plugin: %s" % dir_name)
|
||||||
|
shutil.rmtree("%s/%s" % (self.path_plugins, dir_name))
|
||||||
|
|
||||||
|
# -- Load / Unload --
|
||||||
|
|
||||||
|
def listPlugins(self, list_disabled=False):
|
||||||
|
plugins = []
|
||||||
|
for dir_name in sorted(os.listdir(self.path_plugins)):
|
||||||
|
dir_path = os.path.join(self.path_plugins, dir_name)
|
||||||
|
plugin_name = dir_name.replace("disabled-", "")
|
||||||
|
if dir_name.startswith("disabled"):
|
||||||
|
is_enabled = False
|
||||||
|
else:
|
||||||
|
is_enabled = True
|
||||||
|
|
||||||
|
plugin_config = self.config["builtin"].get(plugin_name, {})
|
||||||
|
if "enabled" in plugin_config:
|
||||||
|
is_enabled = plugin_config["enabled"]
|
||||||
|
|
||||||
|
if dir_name == "__pycache__" or not os.path.isdir(dir_path):
|
||||||
|
continue # skip
|
||||||
|
if dir_name.startswith("Debug") and not config.debug:
|
||||||
|
continue # Only load in debug mode if module name starts with Debug
|
||||||
|
if not is_enabled and not list_disabled:
|
||||||
|
continue # Dont load if disabled
|
||||||
|
|
||||||
|
plugin = {}
|
||||||
|
plugin["source"] = "builtin"
|
||||||
|
plugin["name"] = plugin_name
|
||||||
|
plugin["dir_name"] = dir_name
|
||||||
|
plugin["dir_path"] = dir_path
|
||||||
|
plugin["inner_path"] = plugin_name
|
||||||
|
plugin["enabled"] = is_enabled
|
||||||
|
plugin["rev"] = config.rev
|
||||||
|
plugin["loaded"] = plugin_name in self.plugin_names
|
||||||
|
plugins.append(plugin)
|
||||||
|
|
||||||
|
plugins += self.listInstalledPlugins(list_disabled)
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
def listInstalledPlugins(self, list_disabled=False):
|
||||||
|
plugins = []
|
||||||
|
|
||||||
|
for address, site_plugins in sorted(self.config.items()):
|
||||||
|
if address == "builtin":
|
||||||
|
continue
|
||||||
|
for plugin_inner_path, plugin_config in sorted(site_plugins.items()):
|
||||||
|
is_enabled = plugin_config.get("enabled", False)
|
||||||
|
if not is_enabled and not list_disabled:
|
||||||
|
continue
|
||||||
|
plugin_name = os.path.basename(plugin_inner_path)
|
||||||
|
|
||||||
|
dir_path = "%s/%s/%s" % (self.path_installed_plugins, address, plugin_inner_path)
|
||||||
|
|
||||||
|
plugin = {}
|
||||||
|
plugin["source"] = address
|
||||||
|
plugin["name"] = plugin_name
|
||||||
|
plugin["dir_name"] = plugin_name
|
||||||
|
plugin["dir_path"] = dir_path
|
||||||
|
plugin["inner_path"] = plugin_inner_path
|
||||||
|
plugin["enabled"] = is_enabled
|
||||||
|
plugin["rev"] = plugin_config.get("rev", 0)
|
||||||
|
plugin["loaded"] = plugin_name in self.plugin_names
|
||||||
|
plugins.append(plugin)
|
||||||
|
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
# Load all plugin
|
||||||
|
def loadPlugins(self):
|
||||||
|
all_loaded = True
|
||||||
|
s = time.time()
|
||||||
|
for plugin in self.listPlugins():
|
||||||
|
self.log.debug("Loading plugin: %s (%s)" % (plugin["name"], plugin["source"]))
|
||||||
|
if plugin["source"] != "builtin":
|
||||||
|
self.plugins_rev[plugin["name"]] = plugin["rev"]
|
||||||
|
site_plugin_dir = os.path.dirname(plugin["dir_path"])
|
||||||
|
if site_plugin_dir not in sys.path:
|
||||||
|
sys.path.append(site_plugin_dir)
|
||||||
|
try:
|
||||||
|
sys.modules[plugin["name"]] = __import__(plugin["dir_name"])
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Plugin %s load error: %s" % (plugin["name"], Debug.formatException(err)))
|
||||||
|
all_loaded = False
|
||||||
|
if plugin["name"] not in self.plugin_names:
|
||||||
|
self.plugin_names.append(plugin["name"])
|
||||||
|
|
||||||
|
self.log.debug("Plugins loaded in %.3fs" % (time.time() - s))
|
||||||
|
for func in self.after_load:
|
||||||
|
func()
|
||||||
|
return all_loaded
|
||||||
|
|
||||||
|
# Reload all plugins
|
||||||
|
def reloadPlugins(self):
|
||||||
|
self.reloading = True
|
||||||
|
self.after_load = []
|
||||||
|
self.plugins_before = self.plugins
|
||||||
|
self.plugins = defaultdict(list) # Reset registered plugins
|
||||||
|
for module_name, module in list(sys.modules.items()):
|
||||||
|
if not module or not getattr(module, "__file__", None):
|
||||||
|
continue
|
||||||
|
if self.path_plugins not in module.__file__ and self.path_installed_plugins not in module.__file__:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if "allow_reload" in dir(module) and not module.allow_reload: # Reload disabled
|
||||||
|
# Re-add non-reloadable plugins
|
||||||
|
for class_name, classes in self.plugins_before.items():
|
||||||
|
for c in classes:
|
||||||
|
if c.__module__ != module.__name__:
|
||||||
|
continue
|
||||||
|
self.plugins[class_name].append(c)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
importlib.reload(module)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Plugin %s reload error: %s" % (module_name, Debug.formatException(err)))
|
||||||
|
|
||||||
|
self.loadPlugins() # Load new plugins
|
||||||
|
|
||||||
|
# Change current classes in memory
|
||||||
|
import gc
|
||||||
|
patched = {}
|
||||||
|
for class_name, classes in self.plugins.items():
|
||||||
|
classes = classes[:] # Copy the current plugins
|
||||||
|
classes.reverse()
|
||||||
|
base_class = self.pluggable[class_name] # Original class
|
||||||
|
classes.append(base_class) # Add the class itself to end of inherience line
|
||||||
|
plugined_class = type(class_name, tuple(classes), dict()) # Create the plugined class
|
||||||
|
for obj in gc.get_objects():
|
||||||
|
if type(obj).__name__ == class_name:
|
||||||
|
obj.__class__ = plugined_class
|
||||||
|
patched[class_name] = patched.get(class_name, 0) + 1
|
||||||
|
self.log.debug("Patched objects: %s" % patched)
|
||||||
|
|
||||||
|
# Change classes in modules
|
||||||
|
patched = {}
|
||||||
|
for class_name, classes in self.plugins.items():
|
||||||
|
for module_name, module in list(sys.modules.items()):
|
||||||
|
if class_name in dir(module):
|
||||||
|
if "__class__" not in dir(getattr(module, class_name)): # Not a class
|
||||||
|
continue
|
||||||
|
base_class = self.pluggable[class_name]
|
||||||
|
classes = self.plugins[class_name][:]
|
||||||
|
classes.reverse()
|
||||||
|
classes.append(base_class)
|
||||||
|
plugined_class = type(class_name, tuple(classes), dict())
|
||||||
|
setattr(module, class_name, plugined_class)
|
||||||
|
patched[class_name] = patched.get(class_name, 0) + 1
|
||||||
|
|
||||||
|
self.log.debug("Patched modules: %s" % patched)
|
||||||
|
self.reloading = False
|
||||||
|
|
||||||
|
|
||||||
|
plugin_manager = PluginManager() # Singletone
|
||||||
|
|
||||||
|
# -- Decorators --
|
||||||
|
|
||||||
|
# Accept plugin to class decorator
|
||||||
|
|
||||||
|
|
||||||
|
def acceptPlugins(base_class):
|
||||||
|
class_name = base_class.__name__
|
||||||
|
plugin_manager.pluggable[class_name] = base_class
|
||||||
|
if class_name in plugin_manager.plugins: # Has plugins
|
||||||
|
classes = plugin_manager.plugins[class_name][:] # Copy the current plugins
|
||||||
|
|
||||||
|
# Restore the subclass order after reload
|
||||||
|
if class_name in plugin_manager.subclass_order:
|
||||||
|
classes = sorted(
|
||||||
|
classes,
|
||||||
|
key=lambda key:
|
||||||
|
plugin_manager.subclass_order[class_name].index(str(key))
|
||||||
|
if str(key) in plugin_manager.subclass_order[class_name]
|
||||||
|
else 9999
|
||||||
|
)
|
||||||
|
plugin_manager.subclass_order[class_name] = list(map(str, classes))
|
||||||
|
|
||||||
|
classes.reverse()
|
||||||
|
classes.append(base_class) # Add the class itself to end of inherience line
|
||||||
|
plugined_class = type(class_name, tuple(classes), dict()) # Create the plugined class
|
||||||
|
plugin_manager.log.debug("New class accepts plugins: %s (Loaded plugins: %s)" % (class_name, classes))
|
||||||
|
else: # No plugins just use the original
|
||||||
|
plugined_class = base_class
|
||||||
|
return plugined_class
|
||||||
|
|
||||||
|
|
||||||
|
# Register plugin to class name decorator
|
||||||
|
def registerTo(class_name):
|
||||||
|
if config.debug and not plugin_manager.reloading:
|
||||||
|
import gc
|
||||||
|
for obj in gc.get_objects():
|
||||||
|
if type(obj).__name__ == class_name:
|
||||||
|
raise Exception("Class %s instances already present in memory" % class_name)
|
||||||
|
break
|
||||||
|
|
||||||
|
plugin_manager.log.debug("New plugin registered to: %s" % class_name)
|
||||||
|
if class_name not in plugin_manager.plugins:
|
||||||
|
plugin_manager.plugins[class_name] = []
|
||||||
|
|
||||||
|
def classDecorator(self):
|
||||||
|
plugin_manager.plugins[class_name].append(self)
|
||||||
|
return self
|
||||||
|
return classDecorator
|
||||||
|
|
||||||
|
|
||||||
|
def afterLoad(func):
|
||||||
|
plugin_manager.after_load.append(func)
|
||||||
|
return func
|
||||||
|
|
||||||
|
|
||||||
|
# - Example usage -
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
@registerTo("Request")
|
||||||
|
class RequestPlugin(object):
|
||||||
|
|
||||||
|
def actionMainPage(self, path):
|
||||||
|
return "Hello MainPage!"
|
||||||
|
|
||||||
|
@acceptPlugins
|
||||||
|
class Request(object):
|
||||||
|
|
||||||
|
def route(self, path):
|
||||||
|
func = getattr(self, "action" + path, None)
|
||||||
|
if func:
|
||||||
|
return func(path)
|
||||||
|
else:
|
||||||
|
return "Can't route to", path
|
||||||
|
|
||||||
|
print(Request().route("MainPage"))
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,360 @@
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import collections
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from Config import config
|
||||||
|
from Debug import Debug
|
||||||
|
from util import helper
|
||||||
|
from greenlet import GreenletExit
|
||||||
|
import util
|
||||||
|
from util import CircularIterator
|
||||||
|
|
||||||
|
|
||||||
|
class AnnounceError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
global_stats = collections.defaultdict(lambda: collections.defaultdict(int))
|
||||||
|
|
||||||
|
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class SiteAnnouncer(object):
|
||||||
|
def __init__(self, site):
|
||||||
|
self.site = site
|
||||||
|
self.log = logging.getLogger("Site:%s SiteAnnouncer" % self.site.address_short)
|
||||||
|
|
||||||
|
self.stats = {}
|
||||||
|
self.fileserver_port = config.fileserver_port
|
||||||
|
self.peer_id = self.site.connection_server.peer_id
|
||||||
|
self.tracker_circular_iterator = CircularIterator()
|
||||||
|
self.time_last_announce = 0
|
||||||
|
self.supported_tracker_count = 0
|
||||||
|
|
||||||
|
# Returns connection_server rela
|
||||||
|
# Since 0.8.0
|
||||||
|
@property
|
||||||
|
def connection_server(self):
|
||||||
|
return self.site.connection_server
|
||||||
|
|
||||||
|
def getTrackers(self):
|
||||||
|
return config.trackers
|
||||||
|
|
||||||
|
def getSupportedTrackers(self):
|
||||||
|
trackers = self.getTrackers()
|
||||||
|
|
||||||
|
if not self.connection_server.tor_manager.enabled:
|
||||||
|
trackers = [tracker for tracker in trackers if ".onion" not in tracker]
|
||||||
|
|
||||||
|
trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address
|
||||||
|
|
||||||
|
if "ipv6" not in self.connection_server.supported_ip_types:
|
||||||
|
trackers = [tracker for tracker in trackers if self.connection_server.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"]
|
||||||
|
|
||||||
|
return trackers
|
||||||
|
|
||||||
|
# Returns a cached value of len(self.getSupportedTrackers()), which can be
|
||||||
|
# inacurate.
|
||||||
|
# To be used from Site for estimating available tracker count.
|
||||||
|
def getSupportedTrackerCount(self):
|
||||||
|
return self.supported_tracker_count
|
||||||
|
|
||||||
|
def shouldTrackerBeTemporarilyIgnored(self, tracker, mode, force):
|
||||||
|
if not tracker:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if force:
|
||||||
|
return False
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
# Throttle accessing unresponsive trackers
|
||||||
|
tracker_stats = global_stats[tracker]
|
||||||
|
delay = min(30 * tracker_stats["num_error"], 60 * 10)
|
||||||
|
time_announce_allowed = tracker_stats["time_request"] + delay
|
||||||
|
if now < time_announce_allowed:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getAnnouncingTrackers(self, mode, force):
|
||||||
|
trackers = self.getSupportedTrackers()
|
||||||
|
|
||||||
|
self.supported_tracker_count = len(trackers)
|
||||||
|
|
||||||
|
if trackers and (mode == "update" or mode == "more"):
|
||||||
|
|
||||||
|
# Choose just 2 trackers to announce to
|
||||||
|
|
||||||
|
trackers_announcing = []
|
||||||
|
|
||||||
|
# One is the next in sequence
|
||||||
|
|
||||||
|
self.tracker_circular_iterator.resetSuccessiveCount()
|
||||||
|
while 1:
|
||||||
|
tracker = self.tracker_circular_iterator.next(trackers)
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
|
||||||
|
trackers_announcing.append(tracker)
|
||||||
|
break
|
||||||
|
if self.tracker_circular_iterator.isWrapped():
|
||||||
|
break
|
||||||
|
|
||||||
|
# And one is just random
|
||||||
|
|
||||||
|
shuffled_trackers = random.sample(trackers, len(trackers))
|
||||||
|
for tracker in shuffled_trackers:
|
||||||
|
if tracker in trackers_announcing:
|
||||||
|
continue
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force):
|
||||||
|
trackers_announcing.append(tracker)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
trackers_announcing = [
|
||||||
|
tracker for tracker in trackers
|
||||||
|
if not self.shouldTrackerBeTemporarilyIgnored(tracker, mode, force)
|
||||||
|
]
|
||||||
|
|
||||||
|
return trackers_announcing
|
||||||
|
|
||||||
|
def getOpenedServiceTypes(self):
|
||||||
|
back = []
|
||||||
|
# Type of addresses they can reach me
|
||||||
|
if config.trackers_proxy == "disable" and config.tor != "always":
|
||||||
|
for ip_type, opened in list(self.connection_server.port_opened.items()):
|
||||||
|
if opened:
|
||||||
|
back.append(ip_type)
|
||||||
|
if self.connection_server.tor_manager.start_onions:
|
||||||
|
back.append("onion")
|
||||||
|
return back
|
||||||
|
|
||||||
|
@util.Noparallel()
|
||||||
|
def announce(self, force=False, mode="start", pex=True):
|
||||||
|
if not self.site.isServing():
|
||||||
|
return
|
||||||
|
|
||||||
|
if time.time() - self.time_last_announce < 30 and not force:
|
||||||
|
return # No reannouncing within 30 secs
|
||||||
|
|
||||||
|
self.log.debug("announce: force=%s, mode=%s, pex=%s" % (force, mode, pex))
|
||||||
|
|
||||||
|
self.fileserver_port = config.fileserver_port
|
||||||
|
self.time_last_announce = time.time()
|
||||||
|
|
||||||
|
trackers = self.getAnnouncingTrackers(mode, force)
|
||||||
|
self.log.debug("Chosen trackers: %s" % trackers)
|
||||||
|
self.announceToTrackers(trackers, force=force, mode=mode)
|
||||||
|
|
||||||
|
if pex:
|
||||||
|
self.announcePex()
|
||||||
|
|
||||||
|
def getTrackerHandler(self, protocol):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def getAddressParts(self, tracker):
|
||||||
|
if "://" not in tracker or not re.match("^[A-Za-z0-9:/\\.#-]+$", tracker):
|
||||||
|
return None
|
||||||
|
protocol, address = tracker.split("://", 1)
|
||||||
|
if ":" in address:
|
||||||
|
ip, port = address.rsplit(":", 1)
|
||||||
|
else:
|
||||||
|
ip = address
|
||||||
|
if protocol.startswith("https"):
|
||||||
|
port = 443
|
||||||
|
else:
|
||||||
|
port = 80
|
||||||
|
back = {}
|
||||||
|
back["protocol"] = protocol
|
||||||
|
back["address"] = address
|
||||||
|
back["ip"] = ip
|
||||||
|
back["port"] = port
|
||||||
|
return back
|
||||||
|
|
||||||
|
def announceTracker(self, tracker, mode="start", num_want=10):
|
||||||
|
s = time.time()
|
||||||
|
address_parts = self.getAddressParts(tracker)
|
||||||
|
if not address_parts:
|
||||||
|
self.log.warning("Tracker %s error: Invalid address" % tracker)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if tracker not in self.stats:
|
||||||
|
self.stats[tracker] = {"status": "", "num_request": 0, "num_success": 0, "num_error": 0, "time_request": 0, "time_last_error": 0}
|
||||||
|
|
||||||
|
last_status = self.stats[tracker]["status"]
|
||||||
|
self.stats[tracker]["status"] = "announcing"
|
||||||
|
self.stats[tracker]["time_request"] = time.time()
|
||||||
|
global_stats[tracker]["time_request"] = time.time()
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug("Tracker announcing to %s (mode: %s)" % (tracker, mode))
|
||||||
|
if mode == "update":
|
||||||
|
num_want = 10
|
||||||
|
else:
|
||||||
|
num_want = 30
|
||||||
|
|
||||||
|
handler = self.getTrackerHandler(address_parts["protocol"])
|
||||||
|
error = None
|
||||||
|
try:
|
||||||
|
if handler:
|
||||||
|
peers = handler(address_parts["address"], mode=mode, num_want=num_want)
|
||||||
|
else:
|
||||||
|
raise AnnounceError("Unknown protocol: %s" % address_parts["protocol"])
|
||||||
|
except Exception as err:
|
||||||
|
self.log.warning("Tracker %s announce failed: %s in mode %s" % (tracker, Debug.formatException(err), mode))
|
||||||
|
error = err
|
||||||
|
|
||||||
|
if error:
|
||||||
|
self.stats[tracker]["status"] = "error"
|
||||||
|
self.stats[tracker]["time_status"] = time.time()
|
||||||
|
self.stats[tracker]["last_error"] = str(error)
|
||||||
|
self.stats[tracker]["time_last_error"] = time.time()
|
||||||
|
if self.connection_server.has_internet:
|
||||||
|
self.stats[tracker]["num_error"] += 1
|
||||||
|
self.stats[tracker]["num_request"] += 1
|
||||||
|
global_stats[tracker]["num_request"] += 1
|
||||||
|
if self.connection_server.has_internet:
|
||||||
|
global_stats[tracker]["num_error"] += 1
|
||||||
|
self.updateWebsocket(tracker="error")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if peers is None: # Announce skipped
|
||||||
|
self.stats[tracker]["time_status"] = time.time()
|
||||||
|
self.stats[tracker]["status"] = last_status
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.stats[tracker]["status"] = "announced"
|
||||||
|
self.stats[tracker]["time_status"] = time.time()
|
||||||
|
self.stats[tracker]["num_success"] += 1
|
||||||
|
self.stats[tracker]["num_request"] += 1
|
||||||
|
global_stats[tracker]["num_request"] += 1
|
||||||
|
global_stats[tracker]["num_error"] = 0
|
||||||
|
|
||||||
|
if peers is True: # Announce success, but no peers returned
|
||||||
|
return time.time() - s
|
||||||
|
|
||||||
|
# Adding peers
|
||||||
|
added = 0
|
||||||
|
for peer in peers:
|
||||||
|
if peer["port"] == 1: # Some trackers does not accept port 0, so we send port 1 as not-connectable
|
||||||
|
peer["port"] = 0
|
||||||
|
if not peer["port"]:
|
||||||
|
continue # Dont add peers with port 0
|
||||||
|
if self.site.addPeer(peer["addr"], peer["port"], source="tracker"):
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
if added:
|
||||||
|
self.site.worker_manager.onPeers()
|
||||||
|
self.site.updateWebsocket(peers_added=added)
|
||||||
|
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"Tracker result: %s://%s (found %s peers, new: %s, total: %s)" %
|
||||||
|
(address_parts["protocol"], address_parts["address"], len(peers), added, len(self.site.peers))
|
||||||
|
)
|
||||||
|
return time.time() - s
|
||||||
|
|
||||||
|
def announceToTrackers(self, trackers, force=False, mode="start"):
|
||||||
|
errors = []
|
||||||
|
slow = []
|
||||||
|
s = time.time()
|
||||||
|
threads = []
|
||||||
|
num_announced = 0
|
||||||
|
|
||||||
|
for tracker in trackers: # Start announce threads
|
||||||
|
thread = self.site.greenlet_manager.spawn(self.announceTracker, tracker, mode=mode)
|
||||||
|
threads.append(thread)
|
||||||
|
thread.tracker = tracker
|
||||||
|
|
||||||
|
time.sleep(0.01)
|
||||||
|
self.updateWebsocket(trackers="announcing")
|
||||||
|
|
||||||
|
gevent.joinall(threads, timeout=20) # Wait for announce finish
|
||||||
|
|
||||||
|
for thread in threads:
|
||||||
|
if thread.value is None:
|
||||||
|
continue
|
||||||
|
if thread.value is not False:
|
||||||
|
if thread.value > 1.0: # Takes more than 1 second to announce
|
||||||
|
slow.append("%.2fs %s" % (thread.value, thread.tracker))
|
||||||
|
num_announced += 1
|
||||||
|
else:
|
||||||
|
if thread.ready():
|
||||||
|
errors.append(thread.tracker)
|
||||||
|
else: # Still running
|
||||||
|
slow.append("30s+ %s" % thread.tracker)
|
||||||
|
|
||||||
|
# Save peers num
|
||||||
|
self.site.settings["peers"] = len(self.site.peers)
|
||||||
|
|
||||||
|
if len(errors) < len(threads): # At least one tracker finished
|
||||||
|
if len(trackers) == 1:
|
||||||
|
announced_to = trackers[0]
|
||||||
|
else:
|
||||||
|
announced_to = "%s/%s trackers" % (num_announced, len(threads))
|
||||||
|
if mode != "update" or config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"Announced in mode %s to %s in %.3fs, errors: %s, slow: %s" %
|
||||||
|
(mode, announced_to, time.time() - s, errors, slow)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if len(threads) > 1:
|
||||||
|
self.log.error("Announce to %s trackers in %.3fs, failed" % (len(threads), time.time() - s))
|
||||||
|
if len(threads) > 1 and mode != "start": # Move to next tracker
|
||||||
|
self.log.debug("Tracker failed, skipping to next one...")
|
||||||
|
self.site.greenlet_manager.spawnLater(5.0, self.announce, force=force, mode=mode, pex=False)
|
||||||
|
|
||||||
|
self.updateWebsocket(trackers="announced")
|
||||||
|
|
||||||
|
@util.Noparallel(blocking=False)
|
||||||
|
def announcePex(self, query_num=2, need_num=10, establish_connections=True):
|
||||||
|
peers = []
|
||||||
|
try:
|
||||||
|
peer_count = 20 + query_num * 2
|
||||||
|
|
||||||
|
# Wait for some peers to connect
|
||||||
|
for _ in range(5):
|
||||||
|
if not self.site.isServing():
|
||||||
|
return
|
||||||
|
peers = self.site.getConnectedPeers(only_fully_connected=True)
|
||||||
|
if len(peers) > 0:
|
||||||
|
break
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
if len(peers) < peer_count and establish_connections:
|
||||||
|
# Small number of connected peers for this site, connect to any
|
||||||
|
peers = list(self.site.getRecentPeers(peer_count))
|
||||||
|
|
||||||
|
if len(peers) > 0:
|
||||||
|
self.updateWebsocket(pex="announcing")
|
||||||
|
|
||||||
|
random.shuffle(peers)
|
||||||
|
done = 0
|
||||||
|
total_added = 0
|
||||||
|
for peer in peers:
|
||||||
|
if not establish_connections and not peer.isConnected():
|
||||||
|
continue
|
||||||
|
num_added = peer.pex(need_num=need_num)
|
||||||
|
if num_added is not False:
|
||||||
|
done += 1
|
||||||
|
total_added += num_added
|
||||||
|
if num_added:
|
||||||
|
self.site.worker_manager.onPeers()
|
||||||
|
self.site.updateWebsocket(peers_added=num_added)
|
||||||
|
if done == query_num:
|
||||||
|
break
|
||||||
|
time.sleep(0.1)
|
||||||
|
self.log.debug("Pex result: from %s peers got %s new peers." % (done, total_added))
|
||||||
|
finally:
|
||||||
|
if len(peers) > 0:
|
||||||
|
self.updateWebsocket(pex="announced")
|
||||||
|
|
||||||
|
def updateWebsocket(self, **kwargs):
|
||||||
|
if kwargs:
|
||||||
|
param = {"event": list(kwargs.items())[0]}
|
||||||
|
else:
|
||||||
|
param = None
|
||||||
|
|
||||||
|
for ws in self.site.websockets:
|
||||||
|
ws.event("announcerChanged", self.site, param)
|
|
@ -0,0 +1,256 @@
|
||||||
|
import time
|
||||||
|
import weakref
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
class ConnectRequirement(object):
|
||||||
|
next_id = 1
|
||||||
|
def __init__(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
|
||||||
|
self.need_nr_peers = need_nr_peers # how many total peers we need
|
||||||
|
self.need_nr_connected_peers = need_nr_connected_peers # how many connected peers we need
|
||||||
|
self.result = gevent.event.AsyncResult() # resolves on need_nr_peers condition
|
||||||
|
self.result_connected = gevent.event.AsyncResult() # resolves on need_nr_connected_peers condition
|
||||||
|
|
||||||
|
self.expiration_interval = expiration_interval
|
||||||
|
self.expired = False
|
||||||
|
if expiration_interval:
|
||||||
|
self.expire_at = time.time() + expiration_interval
|
||||||
|
else:
|
||||||
|
self.expire_at = None
|
||||||
|
|
||||||
|
self.nr_peers = -1 # updated PeerConnector()
|
||||||
|
self.nr_connected_peers = -1 # updated PeerConnector()
|
||||||
|
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
self.id = type(self).next_id
|
||||||
|
type(self).next_id += 1
|
||||||
|
|
||||||
|
def fulfilled(self):
|
||||||
|
return self.result.ready() and self.result_connected.ready()
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
return self.expired or self.fulfilled()
|
||||||
|
|
||||||
|
# Heartbeat sent when any of the following happens:
|
||||||
|
# * self.result is set
|
||||||
|
# * self.result_connected is set
|
||||||
|
# * self.nr_peers changed
|
||||||
|
# * self.nr_peers_connected changed
|
||||||
|
# * self.expired is set
|
||||||
|
def waitHeartbeat(self, timeout=None):
|
||||||
|
if self.heartbeat.ready():
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
return self.heartbeat.wait(timeout=timeout)
|
||||||
|
|
||||||
|
def sendHeartbeat(self):
|
||||||
|
self.heartbeat.set_result()
|
||||||
|
if self.heartbeat.ready():
|
||||||
|
self.heartbeat = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
class PeerConnector(object):
|
||||||
|
|
||||||
|
def __init__(self, site):
|
||||||
|
self.site = site
|
||||||
|
|
||||||
|
self.peer_reqs = weakref.WeakValueDictionary() # How many connected peers we need.
|
||||||
|
# Separate entry for each requirement.
|
||||||
|
# Objects of type ConnectRequirement.
|
||||||
|
self.peer_connector_controller = None # Thread doing the orchestration in background.
|
||||||
|
self.peer_connector_workers = dict() # Threads trying to connect to individual peers.
|
||||||
|
self.peer_connector_worker_limit = 5 # Max nr of workers.
|
||||||
|
self.peer_connector_announcer = None # Thread doing announces in background.
|
||||||
|
|
||||||
|
# Max effective values. Set by processReqs().
|
||||||
|
self.need_nr_peers = 0
|
||||||
|
self.need_nr_connected_peers = 0
|
||||||
|
self.nr_peers = 0 # set by processReqs()
|
||||||
|
self.nr_connected_peers = 0 # set by processReqs2()
|
||||||
|
|
||||||
|
# Connector Controller state
|
||||||
|
self.peers = list()
|
||||||
|
|
||||||
|
def addReq(self, req):
|
||||||
|
self.peer_reqs[req.id] = req
|
||||||
|
self.processReqs()
|
||||||
|
|
||||||
|
def newReq(self, need_nr_peers, need_nr_connected_peers, expiration_interval=None):
|
||||||
|
req = ConnectRequirement(need_nr_peers, need_nr_connected_peers, expiration_interval=expiration_interval)
|
||||||
|
self.addReq(req)
|
||||||
|
return req
|
||||||
|
|
||||||
|
def processReqs(self, nr_connected_peers=None):
|
||||||
|
nr_peers = len(self.site.peers)
|
||||||
|
self.nr_peers = nr_peers
|
||||||
|
|
||||||
|
need_nr_peers = 0
|
||||||
|
need_nr_connected_peers = 0
|
||||||
|
|
||||||
|
items = list(self.peer_reqs.items())
|
||||||
|
for key, req in items:
|
||||||
|
send_heartbeat = False
|
||||||
|
|
||||||
|
if req.expire_at and req.expire_at < time.time():
|
||||||
|
req.expired = True
|
||||||
|
self.peer_reqs.pop(key, None)
|
||||||
|
send_heartbeat = True
|
||||||
|
elif req.result.ready() and req.result_connected.ready():
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if nr_connected_peers is not None:
|
||||||
|
if req.need_nr_peers <= nr_peers and req.need_nr_connected_peers <= nr_connected_peers:
|
||||||
|
req.result.set_result(nr_peers)
|
||||||
|
req.result_connected.set_result(nr_connected_peers)
|
||||||
|
send_heartbeat = True
|
||||||
|
if req.nr_peers != nr_peers or req.nr_connected_peers != nr_connected_peers:
|
||||||
|
req.nr_peers = nr_peers
|
||||||
|
req.nr_connected_peers = nr_connected_peers
|
||||||
|
send_heartbeat = True
|
||||||
|
|
||||||
|
if not (req.result.ready() and req.result_connected.ready()):
|
||||||
|
need_nr_peers = max(need_nr_peers, req.need_nr_peers)
|
||||||
|
need_nr_connected_peers = max(need_nr_connected_peers, req.need_nr_connected_peers)
|
||||||
|
|
||||||
|
if send_heartbeat:
|
||||||
|
req.sendHeartbeat()
|
||||||
|
|
||||||
|
self.need_nr_peers = need_nr_peers
|
||||||
|
self.need_nr_connected_peers = need_nr_connected_peers
|
||||||
|
|
||||||
|
if nr_connected_peers is None:
|
||||||
|
nr_connected_peers = 0
|
||||||
|
if need_nr_peers > nr_peers:
|
||||||
|
self.spawnPeerConnectorAnnouncer();
|
||||||
|
if need_nr_connected_peers > nr_connected_peers:
|
||||||
|
self.spawnPeerConnectorController();
|
||||||
|
|
||||||
|
def processReqs2(self):
|
||||||
|
self.nr_connected_peers = len(self.site.getConnectedPeers(only_fully_connected=True))
|
||||||
|
self.processReqs(nr_connected_peers=self.nr_connected_peers)
|
||||||
|
|
||||||
|
# For adding new peers when ConnectorController is working.
|
||||||
|
# While it is iterating over a cached list of peers, there can be a significant lag
|
||||||
|
# for a newly discovered peer to get in sight of the controller.
|
||||||
|
# Suppose most previously known peers are dead and we've just get a few
|
||||||
|
# new peers from a tracker.
|
||||||
|
# So we mix the new peer to the cached list.
|
||||||
|
# When ConnectorController is stopped (self.peers is empty), we just do nothing here.
|
||||||
|
def addPeer(self, peer):
|
||||||
|
if not self.peers:
|
||||||
|
return
|
||||||
|
if peer not in self.peers:
|
||||||
|
self.peers.append(peer)
|
||||||
|
|
||||||
|
def deregisterPeer(self, peer):
|
||||||
|
try:
|
||||||
|
self.peers.remove(peer)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def sleep(self, t):
|
||||||
|
self.site.connection_server.sleep(t)
|
||||||
|
|
||||||
|
def keepGoing(self):
|
||||||
|
return self.site.isServing() and self.site.connection_server.allowsCreatingConnections()
|
||||||
|
|
||||||
|
def peerConnectorWorker(self, peer):
|
||||||
|
if not peer.isConnected():
|
||||||
|
peer.connect()
|
||||||
|
if peer.isConnected():
|
||||||
|
peer.ping()
|
||||||
|
self.processReqs2()
|
||||||
|
|
||||||
|
def peerConnectorController(self):
|
||||||
|
self.peers = list()
|
||||||
|
addendum = 20
|
||||||
|
while self.keepGoing():
|
||||||
|
|
||||||
|
no_peers_loop = 0
|
||||||
|
while len(self.site.peers) < 1:
|
||||||
|
# No peers at all.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.sleep(10 + no_peers_loop)
|
||||||
|
no_peers_loop += 1
|
||||||
|
if not self.keepGoing() or no_peers_loop > 60:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.processReqs2()
|
||||||
|
|
||||||
|
if self.need_nr_connected_peers <= self.nr_connected_peers:
|
||||||
|
# Ok, nobody waits for connected peers.
|
||||||
|
# Done.
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(self.site.peers) < 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(self.peers) < 1:
|
||||||
|
# refill the peer list
|
||||||
|
self.peers = self.site.getRecentPeers(self.need_nr_connected_peers * 2 + self.nr_connected_peers + addendum)
|
||||||
|
addendum = min(addendum * 2 + 50, 10000)
|
||||||
|
if len(self.peers) <= self.nr_connected_peers:
|
||||||
|
# Looks like all known peers are connected.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.site.announcer.announcePex(establish_connections=False)
|
||||||
|
self.sleep(10)
|
||||||
|
continue
|
||||||
|
|
||||||
|
added = 0
|
||||||
|
|
||||||
|
# try connecting to peers
|
||||||
|
while self.keepGoing() and len(self.peer_connector_workers) < self.peer_connector_worker_limit:
|
||||||
|
if len(self.peers) < 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
peer = self.peers.pop(0)
|
||||||
|
|
||||||
|
if peer.isConnected():
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.peer_connector_workers.get(peer, None)
|
||||||
|
if thread:
|
||||||
|
continue
|
||||||
|
|
||||||
|
thread = self.site.spawn(self.peerConnectorWorker, peer)
|
||||||
|
self.peer_connector_workers[peer] = thread
|
||||||
|
thread.link(lambda thread, peer=peer: self.peer_connector_workers.pop(peer, None))
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
if not self.keepGoing():
|
||||||
|
break
|
||||||
|
|
||||||
|
if not added:
|
||||||
|
# Looks like all known peers are either connected or being connected,
|
||||||
|
# so we weren't able to start connecting any peer in this iteration.
|
||||||
|
# Waiting for the announcer to discover some peers.
|
||||||
|
self.sleep(20)
|
||||||
|
|
||||||
|
# wait for more room in self.peer_connector_workers
|
||||||
|
while self.keepGoing() and len(self.peer_connector_workers) >= self.peer_connector_worker_limit:
|
||||||
|
self.sleep(2)
|
||||||
|
|
||||||
|
if not self.site.connection_server.isInternetOnline():
|
||||||
|
self.sleep(30)
|
||||||
|
|
||||||
|
self.peers = list()
|
||||||
|
self.peer_connector_controller = None
|
||||||
|
|
||||||
|
def peerConnectorAnnouncer(self):
|
||||||
|
while self.keepGoing():
|
||||||
|
if self.need_nr_peers <= self.nr_peers:
|
||||||
|
break
|
||||||
|
self.site.announce(mode="more")
|
||||||
|
self.processReqs2()
|
||||||
|
if self.need_nr_peers <= self.nr_peers:
|
||||||
|
break
|
||||||
|
self.sleep(10)
|
||||||
|
if not self.site.connection_server.isInternetOnline():
|
||||||
|
self.sleep(20)
|
||||||
|
self.peer_connector_announcer = None
|
||||||
|
|
||||||
|
def spawnPeerConnectorController(self):
|
||||||
|
if self.peer_connector_controller is None or self.peer_connector_controller.ready():
|
||||||
|
self.peer_connector_controller = self.site.spawn(self.peerConnectorController)
|
||||||
|
|
||||||
|
def spawnPeerConnectorAnnouncer(self):
|
||||||
|
if self.peer_connector_announcer is None or self.peer_connector_announcer.ready():
|
||||||
|
self.peer_connector_announcer = self.site.spawn(self.peerConnectorAnnouncer)
|
|
@ -0,0 +1,264 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import atexit
|
||||||
|
import collections
|
||||||
|
|
||||||
|
import gevent
|
||||||
|
|
||||||
|
import util
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from Content import ContentDb
|
||||||
|
from Config import config
|
||||||
|
from util import helper
|
||||||
|
from util import RateLimit
|
||||||
|
from util import Cached
|
||||||
|
|
||||||
|
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class SiteManager(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.log = logging.getLogger("SiteManager")
|
||||||
|
self.log.debug("SiteManager created.")
|
||||||
|
self.sites = {}
|
||||||
|
self.sites_changed = int(time.time())
|
||||||
|
self.loaded = False
|
||||||
|
gevent.spawn(self.saveTimer)
|
||||||
|
atexit.register(lambda: self.save(recalculate_size=True))
|
||||||
|
|
||||||
|
# ZeroNet has a bug of desyncing between:
|
||||||
|
# * time sent in a response of listModified
|
||||||
|
# and
|
||||||
|
# * time checked on receiving a file.
|
||||||
|
# This leads to the following scenario:
|
||||||
|
# * Request listModified.
|
||||||
|
# * Detect that the remote peer missing an update
|
||||||
|
# * Send a newer version of the file back to the peer.
|
||||||
|
# * The peer responses "ok: File not changed"
|
||||||
|
# .....
|
||||||
|
# * Request listModified the next time and do all the same again.
|
||||||
|
# So we keep the list of sent back entries to prevent sending multiple useless updates:
|
||||||
|
# "{site.address} - {peer.key} - {inner_path}" -> mtime
|
||||||
|
self.send_back_lru = collections.OrderedDict()
|
||||||
|
|
||||||
|
# Load all sites from data/sites.json
|
||||||
|
@util.Noparallel()
|
||||||
|
def load(self, cleanup=True, startup=False):
|
||||||
|
from Debug import Debug
|
||||||
|
self.log.info("Loading sites... (cleanup: %s, startup: %s)" % (cleanup, startup))
|
||||||
|
self.loaded = False
|
||||||
|
from .Site import Site
|
||||||
|
address_found = []
|
||||||
|
added = 0
|
||||||
|
load_s = time.time()
|
||||||
|
# Load new adresses
|
||||||
|
try:
|
||||||
|
json_path = "%s/sites.json" % config.data_dir
|
||||||
|
data = json.load(open(json_path))
|
||||||
|
except Exception as err:
|
||||||
|
raise Exception("Unable to load %s: %s" % (json_path, err))
|
||||||
|
|
||||||
|
sites_need = []
|
||||||
|
|
||||||
|
for address, settings in data.items():
|
||||||
|
if address not in self.sites:
|
||||||
|
if os.path.isfile("%s/%s/content.json" % (config.data_dir, address)):
|
||||||
|
# Root content.json exists, try load site
|
||||||
|
s = time.time()
|
||||||
|
try:
|
||||||
|
site = Site(address, settings=settings)
|
||||||
|
site.content_manager.contents.get("content.json")
|
||||||
|
except Exception as err:
|
||||||
|
self.log.debug("Error loading site %s: %s" % (address, err))
|
||||||
|
continue
|
||||||
|
self.sites[address] = site
|
||||||
|
self.log.debug("Loaded site %s in %.3fs" % (address, time.time() - s))
|
||||||
|
added += 1
|
||||||
|
elif startup:
|
||||||
|
# No site directory, start download
|
||||||
|
self.log.debug("Found new site in sites.json: %s" % address)
|
||||||
|
sites_need.append([address, settings])
|
||||||
|
added += 1
|
||||||
|
|
||||||
|
address_found.append(address)
|
||||||
|
|
||||||
|
# Remove deleted adresses
|
||||||
|
if cleanup:
|
||||||
|
for address in list(self.sites.keys()):
|
||||||
|
if address not in address_found:
|
||||||
|
del(self.sites[address])
|
||||||
|
self.log.debug("Removed site: %s" % address)
|
||||||
|
|
||||||
|
# Remove orpan sites from contentdb
|
||||||
|
content_db = ContentDb.getContentDb()
|
||||||
|
for row in content_db.execute("SELECT * FROM site").fetchall():
|
||||||
|
address = row["address"]
|
||||||
|
if address not in self.sites and address not in address_found:
|
||||||
|
self.log.info("Deleting orphan site from content.db: %s" % address)
|
||||||
|
|
||||||
|
try:
|
||||||
|
content_db.execute("DELETE FROM site WHERE ?", {"address": address})
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Can't delete site %s from content_db: %s" % (address, err))
|
||||||
|
|
||||||
|
if address in content_db.site_ids:
|
||||||
|
del content_db.site_ids[address]
|
||||||
|
if address in content_db.sites:
|
||||||
|
del content_db.sites[address]
|
||||||
|
|
||||||
|
self.loaded = True
|
||||||
|
for address, settings in sites_need:
|
||||||
|
gevent.spawn(self.need, address, settings=settings)
|
||||||
|
if added:
|
||||||
|
self.log.info("Added %s sites in %.3fs" % (added, time.time() - load_s))
|
||||||
|
|
||||||
|
def saveDelayed(self):
|
||||||
|
RateLimit.callAsync("Save sites.json", allowed_again=5, func=self.save)
|
||||||
|
|
||||||
|
def save(self, recalculate_size=False):
|
||||||
|
if not self.sites:
|
||||||
|
self.log.debug("Save skipped: No sites found")
|
||||||
|
return
|
||||||
|
if not self.loaded:
|
||||||
|
self.log.debug("Save skipped: Not loaded")
|
||||||
|
return
|
||||||
|
s = time.time()
|
||||||
|
data = {}
|
||||||
|
# Generate data file
|
||||||
|
s = time.time()
|
||||||
|
for address, site in list(self.list().items()):
|
||||||
|
if recalculate_size:
|
||||||
|
site.settings["size"], site.settings["size_optional"] = site.content_manager.getTotalSize() # Update site size
|
||||||
|
data[address] = site.settings
|
||||||
|
data[address]["cache"] = site.getSettingsCache()
|
||||||
|
time_generate = time.time() - s
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
if data:
|
||||||
|
helper.atomicWrite("%s/sites.json" % config.data_dir, helper.jsonDumps(data).encode("utf8"))
|
||||||
|
else:
|
||||||
|
self.log.debug("Save error: No data")
|
||||||
|
time_write = time.time() - s
|
||||||
|
|
||||||
|
# Remove cache from site settings
|
||||||
|
for address, site in self.list().items():
|
||||||
|
site.settings["cache"] = {}
|
||||||
|
|
||||||
|
self.log.debug("Saved sites in %.2fs (generate: %.2fs, write: %.2fs)" % (time.time() - s, time_generate, time_write))
|
||||||
|
|
||||||
|
def saveTimer(self):
|
||||||
|
while 1:
|
||||||
|
time.sleep(60 * 10)
|
||||||
|
self.save(recalculate_size=True)
|
||||||
|
|
||||||
|
# Checks if its a valid address
|
||||||
|
def isAddress(self, address):
|
||||||
|
return re.match("^[A-Za-z0-9]{26,35}$", address)
|
||||||
|
|
||||||
|
def isDomain(self, address):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@Cached(timeout=10)
|
||||||
|
def isDomainCached(self, address):
|
||||||
|
return self.isDomain(address)
|
||||||
|
|
||||||
|
def resolveDomain(self, domain):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@Cached(timeout=10)
|
||||||
|
def resolveDomainCached(self, domain):
|
||||||
|
return self.resolveDomain(domain)
|
||||||
|
|
||||||
|
# Checks if the address is blocked. To be implemented in content filter plugins.
|
||||||
|
# Since 0.8.0
|
||||||
|
def isAddressBlocked(self, address):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Return: Site object or None if not found
|
||||||
|
def get(self, address):
|
||||||
|
if self.isDomainCached(address):
|
||||||
|
address_resolved = self.resolveDomainCached(address)
|
||||||
|
if address_resolved:
|
||||||
|
address = address_resolved
|
||||||
|
|
||||||
|
if not self.loaded: # Not loaded yet
|
||||||
|
self.log.debug("Loading site: %s)..." % address)
|
||||||
|
self.load()
|
||||||
|
site = self.sites.get(address)
|
||||||
|
|
||||||
|
return site
|
||||||
|
|
||||||
|
def add(self, address, all_file=True, settings=None, **kwargs):
|
||||||
|
from .Site import Site
|
||||||
|
self.sites_changed = int(time.time())
|
||||||
|
# Try to find site with differect case
|
||||||
|
for recover_address, recover_site in list(self.sites.items()):
|
||||||
|
if recover_address.lower() == address.lower():
|
||||||
|
return recover_site
|
||||||
|
|
||||||
|
if not self.isAddress(address):
|
||||||
|
return False # Not address: %s % address
|
||||||
|
self.log.debug("Added new site: %s" % address)
|
||||||
|
config.loadTrackersFile()
|
||||||
|
site = Site(address, settings=settings)
|
||||||
|
self.sites[address] = site
|
||||||
|
if not site.settings["serving"]: # Maybe it was deleted before
|
||||||
|
site.settings["serving"] = True
|
||||||
|
site.saveSettings()
|
||||||
|
if all_file: # Also download user files on first sync
|
||||||
|
site.download(check_size=True, blind_includes=True)
|
||||||
|
return site
|
||||||
|
|
||||||
|
# Return or create site and start download site files
|
||||||
|
def need(self, address, *args, **kwargs):
|
||||||
|
if self.isDomainCached(address):
|
||||||
|
address_resolved = self.resolveDomainCached(address)
|
||||||
|
if address_resolved:
|
||||||
|
address = address_resolved
|
||||||
|
|
||||||
|
site = self.get(address)
|
||||||
|
if not site: # Site not exist yet
|
||||||
|
site = self.add(address, *args, **kwargs)
|
||||||
|
return site
|
||||||
|
|
||||||
|
def delete(self, address):
|
||||||
|
self.sites_changed = int(time.time())
|
||||||
|
self.log.debug("Deleted site: %s" % address)
|
||||||
|
del(self.sites[address])
|
||||||
|
# Delete from sites.json
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
# Lazy load sites
|
||||||
|
def list(self):
|
||||||
|
if not self.loaded: # Not loaded yet
|
||||||
|
self.log.debug("Sites not loaded yet...")
|
||||||
|
self.load(startup=True)
|
||||||
|
return self.sites
|
||||||
|
|
||||||
|
# Return False if we never sent <inner_path> to <peer>
|
||||||
|
# or if the file that was sent was older than <remote_modified>
|
||||||
|
# so that send back logic is suppressed for <inner_path>.
|
||||||
|
# True if <inner_path> can be sent back to <peer>.
|
||||||
|
def checkSendBackLRU(self, site, peer, inner_path, remote_modified):
|
||||||
|
key = site.address + ' - ' + peer.key + ' - ' + inner_path
|
||||||
|
sent_modified = self.send_back_lru.get(key, 0)
|
||||||
|
return remote_modified < sent_modified
|
||||||
|
|
||||||
|
def addToSendBackLRU(self, site, peer, inner_path, modified):
|
||||||
|
key = site.address + ' - ' + peer.key + ' - ' + inner_path
|
||||||
|
if self.send_back_lru.get(key, None) is None:
|
||||||
|
self.send_back_lru[key] = modified
|
||||||
|
while len(self.send_back_lru) > config.send_back_lru_size:
|
||||||
|
self.send_back_lru.popitem(last=False)
|
||||||
|
else:
|
||||||
|
self.send_back_lru.move_to_end(key, last=True)
|
||||||
|
|
||||||
|
site_manager = SiteManager() # Singletone
|
||||||
|
|
||||||
|
if config.action == "main": # Don't connect / add myself to peerlist
|
||||||
|
peer_blacklist = [("127.0.0.1", config.fileserver_port), ("::1", config.fileserver_port)]
|
||||||
|
else:
|
||||||
|
peer_blacklist = []
|
||||||
|
|
|
@ -0,0 +1,698 @@
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import errno
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import gevent.event
|
||||||
|
|
||||||
|
import util
|
||||||
|
from util import SafeRe
|
||||||
|
from Db.Db import Db
|
||||||
|
from Debug import Debug
|
||||||
|
from Config import config
|
||||||
|
from util import helper
|
||||||
|
from util import ThreadPool
|
||||||
|
from Plugin import PluginManager
|
||||||
|
from Translate import translate as _
|
||||||
|
|
||||||
|
|
||||||
|
thread_pool_fs_read = ThreadPool.ThreadPool(config.threads_fs_read, name="FS read")
|
||||||
|
thread_pool_fs_write = ThreadPool.ThreadPool(config.threads_fs_write, name="FS write")
|
||||||
|
thread_pool_fs_batch = ThreadPool.ThreadPool(1, name="FS batch")
|
||||||
|
|
||||||
|
class VerifyFiles_Notificator(object):
|
||||||
|
def __init__(self, site, quick_check):
|
||||||
|
self.site = site
|
||||||
|
self.quick_check = quick_check
|
||||||
|
self.scanned_files = 0
|
||||||
|
self.websocket_update_interval = 0.25
|
||||||
|
self.websocket_update_time = time.time()
|
||||||
|
|
||||||
|
def inc(self):
|
||||||
|
self.scanned_files += 1
|
||||||
|
if self.websocket_update_time + self.websocket_update_interval < time.time():
|
||||||
|
self.send()
|
||||||
|
|
||||||
|
def send(self):
|
||||||
|
self.websocket_update_time = time.time()
|
||||||
|
if self.quick_check:
|
||||||
|
self.site.updateWebsocket(checking=self.scanned_files)
|
||||||
|
else:
|
||||||
|
self.site.updateWebsocket(verifying=self.scanned_files)
|
||||||
|
|
||||||
|
@PluginManager.acceptPlugins
|
||||||
|
class SiteStorage(object):
|
||||||
|
def __init__(self, site, allow_create=True):
|
||||||
|
self.site = site
|
||||||
|
self.directory = "%s/%s" % (config.data_dir, self.site.address) # Site data diretory
|
||||||
|
self.allowed_dir = os.path.abspath(self.directory) # Only serve file within this dir
|
||||||
|
self.log = site.log
|
||||||
|
self.db = None # Db class
|
||||||
|
self.db_checked = False # Checked db tables since startup
|
||||||
|
self.event_db_busy = None # Gevent AsyncResult if db is working on rebuild
|
||||||
|
self.has_db = self.isFile("dbschema.json") # The site has schema
|
||||||
|
|
||||||
|
if not os.path.isdir(self.directory):
|
||||||
|
if allow_create:
|
||||||
|
os.mkdir(self.directory) # Create directory if not found
|
||||||
|
else:
|
||||||
|
raise Exception("Directory not exists: %s" % self.directory)
|
||||||
|
|
||||||
|
def getDbFile(self):
|
||||||
|
if self.db:
|
||||||
|
return self.db.schema["db_file"]
|
||||||
|
else:
|
||||||
|
if self.isFile("dbschema.json"):
|
||||||
|
schema = self.loadJson("dbschema.json")
|
||||||
|
return schema["db_file"]
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Create new databaseobject with the site's schema
|
||||||
|
def openDb(self, close_idle=False):
|
||||||
|
schema = self.getDbSchema()
|
||||||
|
db_path = self.getPath(schema["db_file"])
|
||||||
|
return Db(schema, db_path, close_idle=close_idle)
|
||||||
|
|
||||||
|
def closeDb(self, reason="Unknown (SiteStorage)"):
|
||||||
|
if self.db:
|
||||||
|
self.db.close(reason)
|
||||||
|
self.event_db_busy = None
|
||||||
|
self.db = None
|
||||||
|
|
||||||
|
def getDbSchema(self):
|
||||||
|
try:
|
||||||
|
self.site.needFile("dbschema.json")
|
||||||
|
schema = self.loadJson("dbschema.json")
|
||||||
|
except Exception as err:
|
||||||
|
raise Exception("dbschema.json is not a valid JSON: %s" % err)
|
||||||
|
return schema
|
||||||
|
|
||||||
|
def loadDb(self):
|
||||||
|
self.log.debug("No database, waiting for dbschema.json...")
|
||||||
|
self.site.needFile("dbschema.json", priority=3)
|
||||||
|
self.log.debug("Got dbschema.json")
|
||||||
|
self.has_db = self.isFile("dbschema.json") # Recheck if dbschema exist
|
||||||
|
if self.has_db:
|
||||||
|
schema = self.getDbSchema()
|
||||||
|
db_path = self.getPath(schema["db_file"])
|
||||||
|
if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0:
|
||||||
|
try:
|
||||||
|
self.rebuildDb(reason="Missing database")
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error(err)
|
||||||
|
pass
|
||||||
|
|
||||||
|
if self.db:
|
||||||
|
self.db.close("Gettig new db for SiteStorage")
|
||||||
|
self.db = self.openDb(close_idle=True)
|
||||||
|
try:
|
||||||
|
changed_tables = self.db.checkTables()
|
||||||
|
if changed_tables:
|
||||||
|
self.rebuildDb(delete_db=False, reason="Changed tables") # TODO: only update the changed table datas
|
||||||
|
except sqlite3.OperationalError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Return db class
|
||||||
|
@util.Noparallel()
|
||||||
|
def getDb(self):
|
||||||
|
if self.event_db_busy: # Db not ready for queries
|
||||||
|
self.log.debug("Wating for db...")
|
||||||
|
self.event_db_busy.get() # Wait for event
|
||||||
|
if not self.db:
|
||||||
|
self.loadDb()
|
||||||
|
return self.db
|
||||||
|
|
||||||
|
def updateDbFile(self, inner_path, file=None, cur=None):
|
||||||
|
path = self.getPath(inner_path)
|
||||||
|
if cur:
|
||||||
|
db = cur.db
|
||||||
|
else:
|
||||||
|
db = self.getDb()
|
||||||
|
return db.updateJson(path, file, cur)
|
||||||
|
|
||||||
|
# Return possible db files for the site
|
||||||
|
@thread_pool_fs_read.wrap
|
||||||
|
def getDbFiles(self):
|
||||||
|
found = 0
|
||||||
|
for content_inner_path, content in self.site.content_manager.contents.items():
|
||||||
|
# content.json file itself
|
||||||
|
if self.isFile(content_inner_path):
|
||||||
|
yield content_inner_path, self.getPath(content_inner_path)
|
||||||
|
else:
|
||||||
|
self.log.debug("[MISSING] %s" % content_inner_path)
|
||||||
|
# Data files in content.json
|
||||||
|
content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site
|
||||||
|
for file_relative_path in list(content.get("files", {}).keys()) + list(content.get("files_optional", {}).keys()):
|
||||||
|
if not file_relative_path.endswith(".json") and not file_relative_path.endswith("json.gz"):
|
||||||
|
continue # We only interesed in json files
|
||||||
|
file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir
|
||||||
|
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
||||||
|
if self.isFile(file_inner_path):
|
||||||
|
yield file_inner_path, self.getPath(file_inner_path)
|
||||||
|
else:
|
||||||
|
self.log.debug("[MISSING] %s" % file_inner_path)
|
||||||
|
found += 1
|
||||||
|
if found % 100 == 0:
|
||||||
|
time.sleep(0.001) # Context switch to avoid UI block
|
||||||
|
|
||||||
|
# Rebuild sql cache
|
||||||
|
@util.Noparallel()
|
||||||
|
@thread_pool_fs_batch.wrap
|
||||||
|
def rebuildDb(self, delete_db=True, reason="Unknown"):
|
||||||
|
self.log.info("Rebuilding db (reason: %s)..." % reason)
|
||||||
|
self.has_db = self.isFile("dbschema.json")
|
||||||
|
if not self.has_db:
|
||||||
|
return False
|
||||||
|
|
||||||
|
schema = self.loadJson("dbschema.json")
|
||||||
|
db_path = self.getPath(schema["db_file"])
|
||||||
|
if os.path.isfile(db_path) and delete_db:
|
||||||
|
if self.db:
|
||||||
|
self.closeDb("rebuilding") # Close db if open
|
||||||
|
time.sleep(0.5)
|
||||||
|
self.log.info("Deleting %s" % db_path)
|
||||||
|
try:
|
||||||
|
os.unlink(db_path)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Delete error: %s" % err)
|
||||||
|
|
||||||
|
if not self.db:
|
||||||
|
self.db = self.openDb()
|
||||||
|
self.event_db_busy = gevent.event.AsyncResult()
|
||||||
|
|
||||||
|
self.log.info("Rebuild: Creating tables...")
|
||||||
|
|
||||||
|
# raise DbTableError if not valid
|
||||||
|
self.db.checkTables()
|
||||||
|
|
||||||
|
cur = self.db.getCursor()
|
||||||
|
cur.logging = False
|
||||||
|
s = time.time()
|
||||||
|
self.log.info("Rebuild: Getting db files...")
|
||||||
|
db_files = list(self.getDbFiles())
|
||||||
|
num_imported = 0
|
||||||
|
num_total = len(db_files)
|
||||||
|
num_error = 0
|
||||||
|
|
||||||
|
self.log.info("Rebuild: Importing data...")
|
||||||
|
try:
|
||||||
|
if num_total > 100:
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
|
||||||
|
"0000", num_total, num_error
|
||||||
|
), "rebuild", 0
|
||||||
|
)
|
||||||
|
for file_inner_path, file_path in db_files:
|
||||||
|
try:
|
||||||
|
if self.updateDbFile(file_inner_path, file=open(file_path, "rb"), cur=cur):
|
||||||
|
num_imported += 1
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Error importing %s: %s" % (file_inner_path, Debug.formatException(err)))
|
||||||
|
num_error += 1
|
||||||
|
|
||||||
|
if num_imported and num_imported % 100 == 0:
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
|
||||||
|
num_imported, num_total, num_error
|
||||||
|
),
|
||||||
|
"rebuild", int(float(num_imported) / num_total * 100)
|
||||||
|
)
|
||||||
|
time.sleep(0.001) # Context switch to avoid UI block
|
||||||
|
|
||||||
|
finally:
|
||||||
|
cur.close()
|
||||||
|
if num_total > 100:
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_["Database rebuilding...<br>Imported {0} of {1} files (error: {2})..."].format(
|
||||||
|
num_imported, num_total, num_error
|
||||||
|
), "rebuild", 100
|
||||||
|
)
|
||||||
|
self.log.info("Rebuild: Imported %s data file in %.3fs" % (num_imported, time.time() - s))
|
||||||
|
self.event_db_busy.set(True) # Event done, notify waiters
|
||||||
|
self.event_db_busy = None # Clear event
|
||||||
|
self.db.commit("Rebuilt")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Execute sql query or rebuild on dberror
|
||||||
|
def query(self, query, params=None):
|
||||||
|
if not query.strip().upper().startswith("SELECT"):
|
||||||
|
raise Exception("Only SELECT query supported")
|
||||||
|
|
||||||
|
try:
|
||||||
|
res = self.getDb().execute(query, params)
|
||||||
|
except sqlite3.DatabaseError as err:
|
||||||
|
if err.__class__.__name__ == "DatabaseError":
|
||||||
|
self.log.error("Database error: %s, query: %s, try to rebuilding it..." % (err, query))
|
||||||
|
try:
|
||||||
|
self.rebuildDb(reason="Query error")
|
||||||
|
except sqlite3.OperationalError:
|
||||||
|
pass
|
||||||
|
res = self.db.cur.execute(query, params)
|
||||||
|
else:
|
||||||
|
raise err
|
||||||
|
return res
|
||||||
|
|
||||||
|
def ensureDir(self, inner_path):
|
||||||
|
try:
|
||||||
|
os.makedirs(self.getPath(inner_path))
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno == errno.EEXIST:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise err
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Open file object
|
||||||
|
def open(self, inner_path, mode="rb", create_dirs=False, **kwargs):
|
||||||
|
file_path = self.getPath(inner_path)
|
||||||
|
if create_dirs:
|
||||||
|
file_inner_dir = os.path.dirname(inner_path)
|
||||||
|
self.ensureDir(file_inner_dir)
|
||||||
|
return open(file_path, mode, **kwargs)
|
||||||
|
|
||||||
|
# Open file object
|
||||||
|
@thread_pool_fs_read.wrap
|
||||||
|
def read(self, inner_path, mode="rb"):
|
||||||
|
return open(self.getPath(inner_path), mode).read()
|
||||||
|
|
||||||
|
@thread_pool_fs_write.wrap
|
||||||
|
def writeThread(self, inner_path, content):
|
||||||
|
file_path = self.getPath(inner_path)
|
||||||
|
# Create dir if not exist
|
||||||
|
self.ensureDir(os.path.dirname(inner_path))
|
||||||
|
# Write file
|
||||||
|
if hasattr(content, 'read'): # File-like object
|
||||||
|
|
||||||
|
with open(file_path, "wb") as file:
|
||||||
|
shutil.copyfileobj(content, file) # Write buff to disk
|
||||||
|
else: # Simple string
|
||||||
|
if inner_path == "content.json" and os.path.isfile(file_path):
|
||||||
|
helper.atomicWrite(file_path, content)
|
||||||
|
else:
|
||||||
|
with open(file_path, "wb") as file:
|
||||||
|
file.write(content)
|
||||||
|
|
||||||
|
# Write content to file
|
||||||
|
def write(self, inner_path, content):
|
||||||
|
self.writeThread(inner_path, content)
|
||||||
|
self.onUpdated(inner_path)
|
||||||
|
|
||||||
|
# Remove file from filesystem
|
||||||
|
def delete(self, inner_path):
|
||||||
|
file_path = self.getPath(inner_path)
|
||||||
|
os.unlink(file_path)
|
||||||
|
self.onUpdated(inner_path, file=False)
|
||||||
|
|
||||||
|
def deleteDir(self, inner_path):
|
||||||
|
dir_path = self.getPath(inner_path)
|
||||||
|
os.rmdir(dir_path)
|
||||||
|
|
||||||
|
def rename(self, inner_path_before, inner_path_after):
|
||||||
|
for retry in range(3):
|
||||||
|
rename_err = None
|
||||||
|
# To workaround "The process cannot access the file beacause it is being used by another process." error
|
||||||
|
try:
|
||||||
|
os.rename(self.getPath(inner_path_before), self.getPath(inner_path_after))
|
||||||
|
break
|
||||||
|
except Exception as err:
|
||||||
|
rename_err = err
|
||||||
|
self.log.error("%s rename error: %s (retry #%s)" % (inner_path_before, err, retry))
|
||||||
|
time.sleep(0.1 + retry)
|
||||||
|
if rename_err:
|
||||||
|
raise rename_err
|
||||||
|
|
||||||
|
# List files from a directory
|
||||||
|
@thread_pool_fs_read.wrap
|
||||||
|
def walk(self, dir_inner_path, ignore=None):
|
||||||
|
directory = self.getPath(dir_inner_path)
|
||||||
|
for root, dirs, files in os.walk(directory):
|
||||||
|
root = root.replace("\\", "/")
|
||||||
|
root_relative_path = re.sub("^%s" % re.escape(directory), "", root).lstrip("/")
|
||||||
|
for file_name in files:
|
||||||
|
if root_relative_path: # Not root dir
|
||||||
|
file_relative_path = root_relative_path + "/" + file_name
|
||||||
|
else:
|
||||||
|
file_relative_path = file_name
|
||||||
|
|
||||||
|
if ignore and SafeRe.match(ignore, file_relative_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield file_relative_path
|
||||||
|
|
||||||
|
# Don't scan directory that is in the ignore pattern
|
||||||
|
if ignore:
|
||||||
|
dirs_filtered = []
|
||||||
|
for dir_name in dirs:
|
||||||
|
if root_relative_path:
|
||||||
|
dir_relative_path = root_relative_path + "/" + dir_name
|
||||||
|
else:
|
||||||
|
dir_relative_path = dir_name
|
||||||
|
|
||||||
|
if ignore == ".*" or re.match(".*([|(]|^)%s([|)]|$)" % re.escape(dir_relative_path + "/.*"), ignore):
|
||||||
|
continue
|
||||||
|
|
||||||
|
dirs_filtered.append(dir_name)
|
||||||
|
dirs[:] = dirs_filtered
|
||||||
|
|
||||||
|
# list directories in a directory
|
||||||
|
@thread_pool_fs_read.wrap
|
||||||
|
def list(self, dir_inner_path):
|
||||||
|
directory = self.getPath(dir_inner_path)
|
||||||
|
return os.listdir(directory)
|
||||||
|
|
||||||
|
# Site content updated
|
||||||
|
def onUpdated(self, inner_path, file=None):
|
||||||
|
# Update Sql cache
|
||||||
|
should_load_to_db = inner_path.endswith(".json") or inner_path.endswith(".json.gz")
|
||||||
|
if inner_path == "dbschema.json":
|
||||||
|
self.has_db = self.isFile("dbschema.json")
|
||||||
|
# Reopen DB to check changes
|
||||||
|
if self.has_db:
|
||||||
|
self.closeDb("New dbschema")
|
||||||
|
self.site.spawn(self.getDb)
|
||||||
|
elif not config.disable_db and should_load_to_db and self.has_db: # Load json file to db
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug("Loading json file to db: %s (file: %s)" % (inner_path, file))
|
||||||
|
try:
|
||||||
|
self.updateDbFile(inner_path, file)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Json %s load error: %s" % (inner_path, Debug.formatException(err)))
|
||||||
|
self.closeDb("Json load error")
|
||||||
|
|
||||||
|
# Load and parse json file
|
||||||
|
@thread_pool_fs_read.wrap
|
||||||
|
def loadJson(self, inner_path):
|
||||||
|
with self.open(inner_path, "r", encoding="utf8") as file:
|
||||||
|
return json.load(file)
|
||||||
|
|
||||||
|
# Write formatted json file
|
||||||
|
def writeJson(self, inner_path, data):
|
||||||
|
# Write to disk
|
||||||
|
self.write(inner_path, helper.jsonDumps(data).encode("utf8"))
|
||||||
|
|
||||||
|
# Get file size
|
||||||
|
def getSize(self, inner_path):
|
||||||
|
path = self.getPath(inner_path)
|
||||||
|
try:
|
||||||
|
return os.path.getsize(path)
|
||||||
|
except Exception:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# File exist
|
||||||
|
def isFile(self, inner_path):
|
||||||
|
return os.path.isfile(self.getPath(inner_path))
|
||||||
|
|
||||||
|
# File or directory exist
|
||||||
|
def isExists(self, inner_path):
|
||||||
|
return os.path.exists(self.getPath(inner_path))
|
||||||
|
|
||||||
|
# Dir exist
|
||||||
|
def isDir(self, inner_path):
|
||||||
|
return os.path.isdir(self.getPath(inner_path))
|
||||||
|
|
||||||
|
# Security check and return path of site's file
|
||||||
|
def getPath(self, inner_path):
|
||||||
|
inner_path = inner_path.replace("\\", "/") # Windows separator fix
|
||||||
|
if not inner_path:
|
||||||
|
return self.directory
|
||||||
|
|
||||||
|
if "../" in inner_path:
|
||||||
|
raise Exception("File not allowed: %s" % inner_path)
|
||||||
|
|
||||||
|
return "%s/%s" % (self.directory, inner_path)
|
||||||
|
|
||||||
|
# Get site dir relative path
|
||||||
|
def getInnerPath(self, path):
|
||||||
|
if path == self.directory:
|
||||||
|
inner_path = ""
|
||||||
|
else:
|
||||||
|
if path.startswith(self.directory):
|
||||||
|
inner_path = path[len(self.directory) + 1:]
|
||||||
|
else:
|
||||||
|
raise Exception("File not allowed: %s" % path)
|
||||||
|
return inner_path
|
||||||
|
|
||||||
|
# Verify all files sha512sum using content.json
|
||||||
|
# The result may not be accurate if self.site.isStopping().
|
||||||
|
# verifyFiles() return immediately in that case.
|
||||||
|
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
|
||||||
|
bad_files = []
|
||||||
|
back = defaultdict(int)
|
||||||
|
back["bad_files"] = bad_files
|
||||||
|
i = 0
|
||||||
|
self.log.debug("Verifing files...")
|
||||||
|
|
||||||
|
if not self.site.content_manager.contents.get("content.json"): # No content.json, download it first
|
||||||
|
self.log.debug("VerifyFile content.json not exists")
|
||||||
|
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
|
||||||
|
self.site.content_manager.loadContent() # Reload content.json
|
||||||
|
|
||||||
|
# Trying to read self.site.content_manager.contents without being stuck
|
||||||
|
# on reading the long file list and also without getting
|
||||||
|
# "RuntimeError: dictionary changed size during iteration"
|
||||||
|
# We can't use just list(iteritems()) since it loads all the contents files
|
||||||
|
# at once and gets unresponsive.
|
||||||
|
contents = {}
|
||||||
|
notificator = None
|
||||||
|
tries = 0
|
||||||
|
max_tries = 40
|
||||||
|
stop = False
|
||||||
|
while not stop:
|
||||||
|
try:
|
||||||
|
contents = {}
|
||||||
|
notificator = VerifyFiles_Notificator(self.site, quick_check)
|
||||||
|
for content_inner_path, content in self.site.content_manager.contents.iteritems():
|
||||||
|
notificator.inc()
|
||||||
|
contents[content_inner_path] = content
|
||||||
|
if self.site.isStopping():
|
||||||
|
stop = True
|
||||||
|
break
|
||||||
|
stop = True
|
||||||
|
except RuntimeError as err:
|
||||||
|
if "changed size during iteration" in str(err):
|
||||||
|
tries += 1
|
||||||
|
if tries >= max_tries:
|
||||||
|
self.log.info("contents.json file list changed during iteration. %s tries done. Giving up.", tries)
|
||||||
|
stop = True
|
||||||
|
self.log.info("contents.json file list changed during iteration. Trying again... (%s)", tries)
|
||||||
|
time.sleep(2 * tries)
|
||||||
|
else:
|
||||||
|
stop = True
|
||||||
|
|
||||||
|
for content_inner_path, content in contents.items():
|
||||||
|
back["num_content"] += 1
|
||||||
|
i += 1
|
||||||
|
if i % 50 == 0:
|
||||||
|
time.sleep(0.001) # Context switch to avoid gevent hangs
|
||||||
|
|
||||||
|
if self.site.isStopping():
|
||||||
|
break
|
||||||
|
|
||||||
|
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
|
||||||
|
back["num_content_missing"] += 1
|
||||||
|
self.log.debug("[MISSING] %s" % content_inner_path)
|
||||||
|
bad_files.append(content_inner_path)
|
||||||
|
|
||||||
|
for file_relative_path in list(content.get("files", {}).keys()):
|
||||||
|
notificator.inc()
|
||||||
|
back["num_file"] += 1
|
||||||
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
|
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
||||||
|
file_path = self.getPath(file_inner_path)
|
||||||
|
if not os.path.isfile(file_path):
|
||||||
|
back["num_file_missing"] += 1
|
||||||
|
self.log.debug("[MISSING] %s" % file_inner_path)
|
||||||
|
bad_files.append(file_inner_path)
|
||||||
|
continue
|
||||||
|
|
||||||
|
err = None
|
||||||
|
|
||||||
|
if quick_check:
|
||||||
|
file_size = os.path.getsize(file_path)
|
||||||
|
expected_size = content["files"][file_relative_path]["size"]
|
||||||
|
ok = file_size == expected_size
|
||||||
|
if not ok:
|
||||||
|
err = "Invalid size: %s - actual, %s - expected" % (file_size, expected_size)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
|
||||||
|
except Exception as err2:
|
||||||
|
err = err2
|
||||||
|
ok = False
|
||||||
|
|
||||||
|
if not ok:
|
||||||
|
back["num_file_invalid"] += 1
|
||||||
|
self.log.debug("[INVALID] %s: %s" % (file_inner_path, err))
|
||||||
|
if add_changed or content.get("cert_user_id"): # If updating own site only add changed user files
|
||||||
|
bad_files.append(file_inner_path)
|
||||||
|
|
||||||
|
# Optional files
|
||||||
|
optional_added = 0
|
||||||
|
optional_removed = 0
|
||||||
|
for file_relative_path in list(content.get("files_optional", {}).keys()):
|
||||||
|
notificator.inc()
|
||||||
|
back["num_optional"] += 1
|
||||||
|
file_node = content["files_optional"][file_relative_path]
|
||||||
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
|
file_inner_path = file_inner_path.strip("/") # Strip leading /
|
||||||
|
file_path = self.getPath(file_inner_path)
|
||||||
|
hash_id = self.site.content_manager.hashfield.getHashId(file_node["sha512"])
|
||||||
|
if not os.path.isfile(file_path):
|
||||||
|
if self.site.content_manager.isDownloaded(file_inner_path, hash_id):
|
||||||
|
back["num_optional_removed"] += 1
|
||||||
|
self.log.debug("[OPTIONAL MISSING] %s" % file_inner_path)
|
||||||
|
self.site.content_manager.optionalRemoved(file_inner_path, hash_id, file_node["size"])
|
||||||
|
if add_optional and self.site.isDownloadable(file_inner_path):
|
||||||
|
self.log.debug("[OPTIONAL ADDING] %s" % file_inner_path)
|
||||||
|
bad_files.append(file_inner_path)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if quick_check:
|
||||||
|
ok = os.path.getsize(file_path) == content["files_optional"][file_relative_path]["size"]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
|
||||||
|
except Exception as err:
|
||||||
|
ok = False
|
||||||
|
|
||||||
|
if ok:
|
||||||
|
if not self.site.content_manager.isDownloaded(file_inner_path, hash_id):
|
||||||
|
back["num_optional_added"] += 1
|
||||||
|
self.site.content_manager.optionalDownloaded(file_inner_path, hash_id, file_node["size"])
|
||||||
|
optional_added += 1
|
||||||
|
self.log.debug("[OPTIONAL FOUND] %s" % file_inner_path)
|
||||||
|
else:
|
||||||
|
if self.site.content_manager.isDownloaded(file_inner_path, hash_id):
|
||||||
|
back["num_optional_removed"] += 1
|
||||||
|
self.site.content_manager.optionalRemoved(file_inner_path, hash_id, file_node["size"])
|
||||||
|
optional_removed += 1
|
||||||
|
bad_files.append(file_inner_path)
|
||||||
|
self.log.debug("[OPTIONAL CHANGED] %s" % file_inner_path)
|
||||||
|
|
||||||
|
if config.verbose:
|
||||||
|
self.log.debug(
|
||||||
|
"%s verified: %s, quick: %s, optionals: +%s -%s" %
|
||||||
|
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
|
||||||
|
)
|
||||||
|
|
||||||
|
notificator.send()
|
||||||
|
|
||||||
|
self.site.content_manager.contents.db.processDelayed()
|
||||||
|
time.sleep(0.001) # Context switch to avoid gevent hangs
|
||||||
|
return back
|
||||||
|
|
||||||
|
# Check and try to fix site files integrity
|
||||||
|
def updateBadFiles(self, quick_check=True):
|
||||||
|
s = time.time()
|
||||||
|
res = self.verifyFiles(
|
||||||
|
quick_check,
|
||||||
|
add_optional=True,
|
||||||
|
add_changed=not self.site.settings.get("own") # Don't overwrite changed files if site owned
|
||||||
|
)
|
||||||
|
bad_files = res["bad_files"]
|
||||||
|
self.site.bad_files = {}
|
||||||
|
if bad_files:
|
||||||
|
for bad_file in bad_files:
|
||||||
|
self.site.bad_files[bad_file] = 1
|
||||||
|
self.log.debug("Checked files in %.2fs... Found bad files: %s, Quick:%s" % (time.time() - s, len(bad_files), quick_check))
|
||||||
|
|
||||||
|
# Delete site's all file
|
||||||
|
@thread_pool_fs_batch.wrap
|
||||||
|
def deleteFiles(self):
|
||||||
|
site_title = self.site.content_manager.contents.get("content.json", {}).get("title", self.site.address)
|
||||||
|
message_id = "delete-%s" % self.site.address
|
||||||
|
self.log.debug("Deleting files from content.json (title: %s)..." % site_title)
|
||||||
|
|
||||||
|
files = [] # Get filenames
|
||||||
|
content_inner_paths = list(self.site.content_manager.contents.keys())
|
||||||
|
for i, content_inner_path in enumerate(content_inner_paths):
|
||||||
|
content = self.site.content_manager.contents.get(content_inner_path, {})
|
||||||
|
files.append(content_inner_path)
|
||||||
|
# Add normal files
|
||||||
|
for file_relative_path in list(content.get("files", {}).keys()):
|
||||||
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
|
files.append(file_inner_path)
|
||||||
|
# Add optional files
|
||||||
|
for file_relative_path in list(content.get("files_optional", {}).keys()):
|
||||||
|
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
|
||||||
|
files.append(file_inner_path)
|
||||||
|
|
||||||
|
if i % 100 == 0:
|
||||||
|
num_files = len(files)
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_("Deleting site <b>{site_title}</b>...<br>Collected {num_files} files"),
|
||||||
|
message_id, (i / len(content_inner_paths)) * 25
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.isFile("dbschema.json"):
|
||||||
|
self.log.debug("Deleting db file...")
|
||||||
|
self.closeDb("Deleting site")
|
||||||
|
self.has_db = False
|
||||||
|
try:
|
||||||
|
schema = self.loadJson("dbschema.json")
|
||||||
|
db_path = self.getPath(schema["db_file"])
|
||||||
|
if os.path.isfile(db_path):
|
||||||
|
os.unlink(db_path)
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Db file delete error: %s" % err)
|
||||||
|
|
||||||
|
num_files = len(files)
|
||||||
|
for i, inner_path in enumerate(files):
|
||||||
|
path = self.getPath(inner_path)
|
||||||
|
if os.path.isfile(path):
|
||||||
|
for retry in range(5):
|
||||||
|
try:
|
||||||
|
os.unlink(path)
|
||||||
|
break
|
||||||
|
except Exception as err:
|
||||||
|
self.log.error("Error removing %s: %s, try #%s" % (inner_path, err, retry))
|
||||||
|
time.sleep(float(retry) / 10)
|
||||||
|
if i % 100 == 0:
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_("Deleting site <b>{site_title}</b>...<br>Deleting file {i}/{num_files}"),
|
||||||
|
message_id, 25 + (i / num_files) * 50
|
||||||
|
)
|
||||||
|
self.onUpdated(inner_path, False)
|
||||||
|
|
||||||
|
self.log.debug("Deleting empty dirs...")
|
||||||
|
i = 0
|
||||||
|
for root, dirs, files in os.walk(self.directory, topdown=False):
|
||||||
|
for dir in dirs:
|
||||||
|
path = os.path.join(root, dir)
|
||||||
|
if os.path.isdir(path):
|
||||||
|
try:
|
||||||
|
i += 1
|
||||||
|
if i % 100 == 0:
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_("Deleting site <b>{site_title}</b>...<br>Deleting empty directories {i}"),
|
||||||
|
message_id, 85
|
||||||
|
)
|
||||||
|
os.rmdir(path)
|
||||||
|
except OSError: # Not empty
|
||||||
|
pass
|
||||||
|
|
||||||
|
if os.path.isdir(self.directory) and os.listdir(self.directory) == []:
|
||||||
|
os.rmdir(self.directory) # Remove sites directory if empty
|
||||||
|
|
||||||
|
if os.path.isdir(self.directory):
|
||||||
|
self.log.debug("Some unknown file remained in site data dir: %s..." % self.directory)
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_("Deleting site <b>{site_title}</b>...<br>Site deleted, but some unknown files left in the directory"),
|
||||||
|
message_id, 100
|
||||||
|
)
|
||||||
|
return False # Some files not deleted
|
||||||
|
else:
|
||||||
|
self.log.debug("Site %s data directory deleted: %s..." % (site_title, self.directory))
|
||||||
|
|
||||||
|
self.site.messageWebsocket(
|
||||||
|
_("Deleting site <b>{site_title}</b>...<br>All files deleted successfully"),
|
||||||
|
message_id, 100
|
||||||
|
)
|
||||||
|
|
||||||
|
return True # All clean
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Ivan Machugovskiy
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,160 @@
|
||||||
|
def loads(data):
|
||||||
|
if not isinstance(data, bytes):
|
||||||
|
raise TypeError("Expected 'bytes' object, got {}".format(type(data)))
|
||||||
|
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
|
||||||
|
def parseInteger():
|
||||||
|
nonlocal offset
|
||||||
|
|
||||||
|
offset += 1
|
||||||
|
had_digit = False
|
||||||
|
abs_value = 0
|
||||||
|
|
||||||
|
sign = 1
|
||||||
|
if data[offset] == ord("-"):
|
||||||
|
sign = -1
|
||||||
|
offset += 1
|
||||||
|
while offset < len(data):
|
||||||
|
if data[offset] == ord("e"):
|
||||||
|
# End of string
|
||||||
|
offset += 1
|
||||||
|
if not had_digit:
|
||||||
|
raise ValueError("Integer without value")
|
||||||
|
break
|
||||||
|
if ord("0") <= data[offset] <= ord("9"):
|
||||||
|
abs_value = abs_value * 10 + int(chr(data[offset]))
|
||||||
|
had_digit = True
|
||||||
|
offset += 1
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid integer")
|
||||||
|
else:
|
||||||
|
raise ValueError("Unexpected EOF, expected integer")
|
||||||
|
|
||||||
|
if not had_digit:
|
||||||
|
raise ValueError("Empty integer")
|
||||||
|
|
||||||
|
return sign * abs_value
|
||||||
|
|
||||||
|
|
||||||
|
def parseString():
|
||||||
|
nonlocal offset
|
||||||
|
|
||||||
|
length = int(chr(data[offset]))
|
||||||
|
offset += 1
|
||||||
|
|
||||||
|
while offset < len(data):
|
||||||
|
if data[offset] == ord(":"):
|
||||||
|
offset += 1
|
||||||
|
break
|
||||||
|
if ord("0") <= data[offset] <= ord("9"):
|
||||||
|
length = length * 10 + int(chr(data[offset]))
|
||||||
|
offset += 1
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid string length")
|
||||||
|
else:
|
||||||
|
raise ValueError("Unexpected EOF, expected string contents")
|
||||||
|
|
||||||
|
if offset + length > len(data):
|
||||||
|
raise ValueError("Unexpected EOF, expected string contents")
|
||||||
|
offset += length
|
||||||
|
|
||||||
|
return data[offset - length:offset]
|
||||||
|
|
||||||
|
|
||||||
|
def parseList():
|
||||||
|
nonlocal offset
|
||||||
|
|
||||||
|
offset += 1
|
||||||
|
values = []
|
||||||
|
|
||||||
|
while offset < len(data):
|
||||||
|
if data[offset] == ord("e"):
|
||||||
|
# End of list
|
||||||
|
offset += 1
|
||||||
|
return values
|
||||||
|
else:
|
||||||
|
values.append(parse())
|
||||||
|
|
||||||
|
raise ValueError("Unexpected EOF, expected list contents")
|
||||||
|
|
||||||
|
|
||||||
|
def parseDict():
|
||||||
|
nonlocal offset
|
||||||
|
|
||||||
|
offset += 1
|
||||||
|
items = {}
|
||||||
|
|
||||||
|
while offset < len(data):
|
||||||
|
if data[offset] == ord("e"):
|
||||||
|
# End of list
|
||||||
|
offset += 1
|
||||||
|
return items
|
||||||
|
else:
|
||||||
|
key, value = parse(), parse()
|
||||||
|
if not isinstance(key, bytes):
|
||||||
|
raise ValueError("A dict key must be a byte string")
|
||||||
|
if key in items:
|
||||||
|
raise ValueError("Duplicate dict key: {}".format(key))
|
||||||
|
items[key] = value
|
||||||
|
|
||||||
|
raise ValueError("Unexpected EOF, expected dict contents")
|
||||||
|
|
||||||
|
|
||||||
|
def parse():
|
||||||
|
nonlocal offset
|
||||||
|
|
||||||
|
if data[offset] == ord("i"):
|
||||||
|
return parseInteger()
|
||||||
|
elif data[offset] == ord("l"):
|
||||||
|
return parseList()
|
||||||
|
elif data[offset] == ord("d"):
|
||||||
|
return parseDict()
|
||||||
|
elif ord("0") <= data[offset] <= ord("9"):
|
||||||
|
return parseString()
|
||||||
|
|
||||||
|
raise ValueError("Unknown type specifier: '{}'".format(chr(data[offset])))
|
||||||
|
|
||||||
|
result = parse()
|
||||||
|
|
||||||
|
if offset != len(data):
|
||||||
|
raise ValueError("Expected EOF, got {} bytes left".format(len(data) - offset))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def dumps(data):
|
||||||
|
result = bytearray()
|
||||||
|
|
||||||
|
|
||||||
|
def convert(data):
|
||||||
|
nonlocal result
|
||||||
|
|
||||||
|
if isinstance(data, str):
|
||||||
|
raise ValueError("bencode only supports bytes, not str. Use encode")
|
||||||
|
|
||||||
|
if isinstance(data, bytes):
|
||||||
|
result += str(len(data)).encode() + b":" + data
|
||||||
|
elif isinstance(data, int):
|
||||||
|
result += b"i" + str(data).encode() + b"e"
|
||||||
|
elif isinstance(data, list):
|
||||||
|
result += b"l"
|
||||||
|
for val in data:
|
||||||
|
convert(val)
|
||||||
|
result += b"e"
|
||||||
|
elif isinstance(data, dict):
|
||||||
|
result += b"d"
|
||||||
|
for key in sorted(data.keys()):
|
||||||
|
if not isinstance(key, bytes):
|
||||||
|
raise ValueError("Dict key can only be bytes, not {}".format(type(key)))
|
||||||
|
convert(key)
|
||||||
|
convert(data[key])
|
||||||
|
result += b"e"
|
||||||
|
else:
|
||||||
|
raise ValueError("bencode only supports bytes, int, list and dict")
|
||||||
|
|
||||||
|
|
||||||
|
convert(data)
|
||||||
|
|
||||||
|
return bytes(result)
|
|
@ -0,0 +1,39 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def prefix(content):
|
||||||
|
content = re.sub(
|
||||||
|
b"@keyframes (.*? {.*?}\s*})", b"@keyframes \\1\n@-webkit-keyframes \\1\n@-moz-keyframes \\1\n",
|
||||||
|
content, flags=re.DOTALL
|
||||||
|
)
|
||||||
|
content = re.sub(
|
||||||
|
b'([^-\*])(border-radius|box-shadow|appearance|transition|animation|box-sizing|' +
|
||||||
|
b'backface-visibility|transform|filter|perspective|animation-[a-z-]+): (.*?)([;}])',
|
||||||
|
b'\\1-webkit-\\2: \\3; -moz-\\2: \\3; -o-\\2: \\3; -ms-\\2: \\3; \\2: \\3 \\4', content
|
||||||
|
)
|
||||||
|
content = re.sub(
|
||||||
|
b'(?<=[^a-zA-Z0-9-])([a-zA-Z0-9-]+): {0,1}(linear-gradient)\((.*?)(\)[;\n])',
|
||||||
|
b'\\1: -webkit-\\2(\\3);' +
|
||||||
|
b'\\1: -moz-\\2(\\3);' +
|
||||||
|
b'\\1: -o-\\2(\\3);' +
|
||||||
|
b'\\1: -ms-\\2(\\3);' +
|
||||||
|
b'\\1: \\2(\\3);', content
|
||||||
|
)
|
||||||
|
return content
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print(prefix(b"""
|
||||||
|
.test {
|
||||||
|
border-radius: 5px;
|
||||||
|
background: linear-gradient(red, blue);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@keyframes flip {
|
||||||
|
0% { transform: perspective(120px) rotateX(0deg) rotateY(0deg); }
|
||||||
|
50% { transform: perspective(120px) rotateX(-180.1deg) rotateY(0deg) }
|
||||||
|
100% { transform: perspective(120px) rotateX(-180deg) rotateY(-179.9deg); }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
""").decode("utf8"))
|
|
@ -0,0 +1,279 @@
|
||||||
|
from gevent.pywsgi import WSGIHandler, _InvalidClientInput
|
||||||
|
from gevent.queue import Queue
|
||||||
|
import gevent
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
import struct
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
SEND_PACKET_SIZE = 1300
|
||||||
|
OPCODE_TEXT = 1
|
||||||
|
OPCODE_BINARY = 2
|
||||||
|
OPCODE_CLOSE = 8
|
||||||
|
OPCODE_PING = 9
|
||||||
|
OPCODE_PONG = 10
|
||||||
|
STATUS_OK = 1000
|
||||||
|
STATUS_PROTOCOL_ERROR = 1002
|
||||||
|
STATUS_DATA_ERROR = 1007
|
||||||
|
STATUS_POLICY_VIOLATION = 1008
|
||||||
|
STATUS_TOO_LONG = 1009
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocket:
|
||||||
|
def __init__(self, socket):
|
||||||
|
self.socket = socket
|
||||||
|
self.closed = False
|
||||||
|
self.status = None
|
||||||
|
self._receive_error = None
|
||||||
|
self._queue = Queue()
|
||||||
|
self.max_length = 10 * 1024 * 1024
|
||||||
|
gevent.spawn(self._listen)
|
||||||
|
|
||||||
|
|
||||||
|
def set_max_message_length(self, length):
|
||||||
|
self.max_length = length
|
||||||
|
|
||||||
|
|
||||||
|
def _listen(self):
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
fin = False
|
||||||
|
message = bytearray()
|
||||||
|
is_first_message = True
|
||||||
|
start_opcode = None
|
||||||
|
while not fin:
|
||||||
|
payload, opcode, fin = self._get_frame(max_length=self.max_length - len(message))
|
||||||
|
# Make sure continuation frames have correct information
|
||||||
|
if not is_first_message and opcode != 0:
|
||||||
|
self._error(STATUS_PROTOCOL_ERROR)
|
||||||
|
if is_first_message:
|
||||||
|
if opcode not in (OPCODE_TEXT, OPCODE_BINARY):
|
||||||
|
self._error(STATUS_PROTOCOL_ERROR)
|
||||||
|
# Save opcode
|
||||||
|
start_opcode = opcode
|
||||||
|
message += payload
|
||||||
|
is_first_message = False
|
||||||
|
message = bytes(message)
|
||||||
|
if start_opcode == OPCODE_TEXT: # UTF-8 text
|
||||||
|
try:
|
||||||
|
message = message.decode()
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
self._error(STATUS_DATA_ERROR)
|
||||||
|
self._queue.put(message)
|
||||||
|
except Exception as e:
|
||||||
|
self.closed = True
|
||||||
|
self._receive_error = e
|
||||||
|
self._queue.put(None) # To make sure the error is read
|
||||||
|
|
||||||
|
|
||||||
|
def receive(self):
|
||||||
|
if not self._queue.empty():
|
||||||
|
return self.receive_nowait()
|
||||||
|
if isinstance(self._receive_error, EOFError):
|
||||||
|
return None
|
||||||
|
if self._receive_error:
|
||||||
|
raise self._receive_error
|
||||||
|
self._queue.peek()
|
||||||
|
return self.receive_nowait()
|
||||||
|
|
||||||
|
|
||||||
|
def receive_nowait(self):
|
||||||
|
ret = self._queue.get_nowait()
|
||||||
|
if self._receive_error and not isinstance(self._receive_error, EOFError):
|
||||||
|
raise self._receive_error
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def send(self, data):
|
||||||
|
if self.closed:
|
||||||
|
raise EOFError()
|
||||||
|
if isinstance(data, str):
|
||||||
|
self._send_frame(OPCODE_TEXT, data.encode())
|
||||||
|
elif isinstance(data, bytes):
|
||||||
|
self._send_frame(OPCODE_BINARY, data)
|
||||||
|
else:
|
||||||
|
raise TypeError("Expected str or bytes, got " + repr(type(data)))
|
||||||
|
|
||||||
|
|
||||||
|
# Reads a frame from the socket. Pings, pongs and close packets are handled
|
||||||
|
# automatically
|
||||||
|
def _get_frame(self, max_length):
|
||||||
|
while True:
|
||||||
|
payload, opcode, fin = self._read_frame(max_length=max_length)
|
||||||
|
if opcode == OPCODE_PING:
|
||||||
|
self._send_frame(OPCODE_PONG, payload)
|
||||||
|
elif opcode == OPCODE_PONG:
|
||||||
|
pass
|
||||||
|
elif opcode == OPCODE_CLOSE:
|
||||||
|
if len(payload) >= 2:
|
||||||
|
self.status = struct.unpack("!H", payload[:2])[0]
|
||||||
|
was_closed = self.closed
|
||||||
|
self.closed = True
|
||||||
|
if not was_closed:
|
||||||
|
# Send a close frame in response
|
||||||
|
self.close(STATUS_OK)
|
||||||
|
raise EOFError()
|
||||||
|
else:
|
||||||
|
return payload, opcode, fin
|
||||||
|
|
||||||
|
|
||||||
|
# Low-level function, use _get_frame instead
|
||||||
|
def _read_frame(self, max_length):
|
||||||
|
header = self._recv_exactly(2)
|
||||||
|
|
||||||
|
if not (header[1] & 0x80):
|
||||||
|
self._error(STATUS_POLICY_VIOLATION)
|
||||||
|
|
||||||
|
opcode = header[0] & 0xf
|
||||||
|
fin = bool(header[0] & 0x80)
|
||||||
|
|
||||||
|
payload_length = header[1] & 0x7f
|
||||||
|
if payload_length == 126:
|
||||||
|
payload_length = struct.unpack("!H", self._recv_exactly(2))[0]
|
||||||
|
elif payload_length == 127:
|
||||||
|
payload_length = struct.unpack("!Q", self._recv_exactly(8))[0]
|
||||||
|
|
||||||
|
# Control frames are handled in a special way
|
||||||
|
if opcode in (OPCODE_PING, OPCODE_PONG):
|
||||||
|
max_length = 125
|
||||||
|
|
||||||
|
if payload_length > max_length:
|
||||||
|
self._error(STATUS_TOO_LONG)
|
||||||
|
|
||||||
|
mask = self._recv_exactly(4)
|
||||||
|
payload = self._recv_exactly(payload_length)
|
||||||
|
payload = self._unmask(payload, mask)
|
||||||
|
|
||||||
|
return payload, opcode, fin
|
||||||
|
|
||||||
|
|
||||||
|
def _recv_exactly(self, length):
|
||||||
|
buf = bytearray()
|
||||||
|
while len(buf) < length:
|
||||||
|
block = self.socket.recv(min(4096, length - len(buf)))
|
||||||
|
if block == b"":
|
||||||
|
raise EOFError()
|
||||||
|
buf += block
|
||||||
|
return bytes(buf)
|
||||||
|
|
||||||
|
|
||||||
|
def _unmask(self, payload, mask):
|
||||||
|
def gen(c):
|
||||||
|
return bytes([x ^ c for x in range(256)])
|
||||||
|
|
||||||
|
|
||||||
|
payload = bytearray(payload)
|
||||||
|
payload[0::4] = payload[0::4].translate(gen(mask[0]))
|
||||||
|
payload[1::4] = payload[1::4].translate(gen(mask[1]))
|
||||||
|
payload[2::4] = payload[2::4].translate(gen(mask[2]))
|
||||||
|
payload[3::4] = payload[3::4].translate(gen(mask[3]))
|
||||||
|
return bytes(payload)
|
||||||
|
|
||||||
|
|
||||||
|
def _send_frame(self, opcode, data):
|
||||||
|
for i in range(0, len(data), SEND_PACKET_SIZE):
|
||||||
|
part = data[i:i + SEND_PACKET_SIZE]
|
||||||
|
fin = int(i == (len(data) - 1) // SEND_PACKET_SIZE * SEND_PACKET_SIZE)
|
||||||
|
header = bytes(
|
||||||
|
[
|
||||||
|
(opcode if i == 0 else 0) | (fin << 7),
|
||||||
|
min(len(part), 126)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if len(part) >= 126:
|
||||||
|
header += struct.pack("!H", len(part))
|
||||||
|
self.socket.sendall(header + part)
|
||||||
|
|
||||||
|
|
||||||
|
def _error(self, status):
|
||||||
|
self.close(status)
|
||||||
|
raise EOFError()
|
||||||
|
|
||||||
|
|
||||||
|
def close(self, status=STATUS_OK):
|
||||||
|
self.closed = True
|
||||||
|
try:
|
||||||
|
self._send_frame(OPCODE_CLOSE, struct.pack("!H", status))
|
||||||
|
except (BrokenPipeError, ConnectionResetError):
|
||||||
|
pass
|
||||||
|
self.socket.close()
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocketHandler(WSGIHandler):
|
||||||
|
def handle_one_response(self):
|
||||||
|
self.time_start = time.time()
|
||||||
|
self.status = None
|
||||||
|
self.headers_sent = False
|
||||||
|
|
||||||
|
self.result = None
|
||||||
|
self.response_use_chunked = False
|
||||||
|
self.response_length = 0
|
||||||
|
|
||||||
|
|
||||||
|
http_connection = [s.strip().lower() for s in self.environ.get("HTTP_CONNECTION", "").split(",")]
|
||||||
|
if "upgrade" not in http_connection or self.environ.get("HTTP_UPGRADE", "").lower() != "websocket":
|
||||||
|
# Not my problem
|
||||||
|
return super(WebSocketHandler, self).handle_one_response()
|
||||||
|
|
||||||
|
if "HTTP_SEC_WEBSOCKET_KEY" not in self.environ:
|
||||||
|
self.start_response("400 Bad Request", [])
|
||||||
|
return
|
||||||
|
|
||||||
|
# Generate Sec-Websocket-Accept header
|
||||||
|
accept = self.environ["HTTP_SEC_WEBSOCKET_KEY"].encode()
|
||||||
|
accept += b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
|
||||||
|
accept = base64.b64encode(hashlib.sha1(accept).digest()).decode()
|
||||||
|
|
||||||
|
# Accept
|
||||||
|
self.start_response("101 Switching Protocols", [
|
||||||
|
("Upgrade", "websocket"),
|
||||||
|
("Connection", "Upgrade"),
|
||||||
|
("Sec-Websocket-Accept", accept)
|
||||||
|
])(b"")
|
||||||
|
|
||||||
|
self.environ["wsgi.websocket"] = WebSocket(self.socket)
|
||||||
|
|
||||||
|
# Can't call super because it sets invalid flags like "status"
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
self.run_application()
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
self.wsgi_input._discard()
|
||||||
|
except (socket.error, IOError):
|
||||||
|
pass
|
||||||
|
except _InvalidClientInput:
|
||||||
|
self._send_error_response_if_possible(400)
|
||||||
|
except socket.error as ex:
|
||||||
|
if ex.args[0] in self.ignored_socket_errors:
|
||||||
|
self.close_connection = True
|
||||||
|
else:
|
||||||
|
self.handle_error(*sys.exc_info())
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
self.handle_error(*sys.exc_info())
|
||||||
|
finally:
|
||||||
|
self.time_finish = time.time()
|
||||||
|
self.log_request()
|
||||||
|
self.close_connection = True
|
||||||
|
|
||||||
|
|
||||||
|
def process_result(self):
|
||||||
|
if "wsgi.websocket" in self.environ:
|
||||||
|
if self.result is None:
|
||||||
|
return
|
||||||
|
# Flushing result is required for werkzeug compatibility
|
||||||
|
for elem in self.result:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
super(WebSocketHandler, self).process_result()
|
||||||
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def version(self):
|
||||||
|
if not self.environ:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.environ.get('HTTP_SEC_WEBSOCKET_VERSION')
|
|
@ -0,0 +1 @@
|
||||||
|
from .libsecp256k1message import *
|
|
@ -0,0 +1,162 @@
|
||||||
|
import hashlib
|
||||||
|
import base64
|
||||||
|
from coincurve import PrivateKey, PublicKey
|
||||||
|
from base58 import b58encode_check, b58decode_check
|
||||||
|
from hmac import compare_digest
|
||||||
|
from util.Electrum import format as zero_format
|
||||||
|
|
||||||
|
RECID_MIN = 0
|
||||||
|
RECID_MAX = 3
|
||||||
|
RECID_UNCOMPR = 27
|
||||||
|
LEN_COMPACT_SIG = 65
|
||||||
|
|
||||||
|
class SignatureError(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def bitcoin_address():
|
||||||
|
"""Generate a public address and a secret address."""
|
||||||
|
publickey, secretkey = key_pair()
|
||||||
|
|
||||||
|
public_address = compute_public_address(publickey)
|
||||||
|
secret_address = compute_secret_address(secretkey)
|
||||||
|
|
||||||
|
return (public_address, secret_address)
|
||||||
|
|
||||||
|
def key_pair():
|
||||||
|
"""Generate a public key and a secret key."""
|
||||||
|
secretkey = PrivateKey()
|
||||||
|
publickey = PublicKey.from_secret(secretkey.secret)
|
||||||
|
return (publickey, secretkey)
|
||||||
|
|
||||||
|
def compute_public_address(publickey, compressed=False):
|
||||||
|
"""Convert a public key to a public Bitcoin address."""
|
||||||
|
public_plain = b'\x00' + public_digest(publickey, compressed=compressed)
|
||||||
|
return b58encode_check(public_plain)
|
||||||
|
|
||||||
|
def compute_secret_address(secretkey):
|
||||||
|
"""Convert a secret key to a secret Bitcoin address."""
|
||||||
|
secret_plain = b'\x80' + secretkey.secret
|
||||||
|
return b58encode_check(secret_plain)
|
||||||
|
|
||||||
|
def public_digest(publickey, compressed=False):
|
||||||
|
"""Convert a public key to ripemd160(sha256()) digest."""
|
||||||
|
publickey_hex = publickey.format(compressed=compressed)
|
||||||
|
return hashlib.new('ripemd160', hashlib.sha256(publickey_hex).digest()).digest()
|
||||||
|
|
||||||
|
def address_public_digest(address):
|
||||||
|
"""Convert a public Bitcoin address to ripemd160(sha256()) digest."""
|
||||||
|
public_plain = b58decode_check(address)
|
||||||
|
if not public_plain.startswith(b'\x00') or len(public_plain) != 21:
|
||||||
|
raise ValueError('Invalid public key digest')
|
||||||
|
return public_plain[1:]
|
||||||
|
|
||||||
|
def _decode_bitcoin_secret(address):
|
||||||
|
secret_plain = b58decode_check(address)
|
||||||
|
if not secret_plain.startswith(b'\x80') or len(secret_plain) != 33:
|
||||||
|
raise ValueError('Invalid secret key. Uncompressed keys only.')
|
||||||
|
return secret_plain[1:]
|
||||||
|
|
||||||
|
def recover_public_key(signature, message):
|
||||||
|
"""Recover public key from signature and message.
|
||||||
|
Recovered public key guarantees a correct signature"""
|
||||||
|
return PublicKey.from_signature_and_message(signature, message)
|
||||||
|
|
||||||
|
def decode_secret_key(address):
|
||||||
|
"""Convert a secret Bitcoin address to a secret key."""
|
||||||
|
return PrivateKey(_decode_bitcoin_secret(address))
|
||||||
|
|
||||||
|
|
||||||
|
def coincurve_sig(electrum_signature):
|
||||||
|
# coincurve := r + s + recovery_id
|
||||||
|
# where (0 <= recovery_id <= 3)
|
||||||
|
# https://github.com/bitcoin-core/secp256k1/blob/0b7024185045a49a1a6a4c5615bf31c94f63d9c4/src/modules/recovery/main_impl.h#L35
|
||||||
|
if len(electrum_signature) != LEN_COMPACT_SIG:
|
||||||
|
raise ValueError('Not a 65-byte compact signature.')
|
||||||
|
# Compute coincurve recid
|
||||||
|
recid = (electrum_signature[0] - 27) & 3
|
||||||
|
if not (RECID_MIN <= recid <= RECID_MAX):
|
||||||
|
raise ValueError('Recovery ID %d is not supported.' % recid)
|
||||||
|
recid_byte = int.to_bytes(recid, length=1, byteorder='big')
|
||||||
|
return electrum_signature[1:] + recid_byte
|
||||||
|
|
||||||
|
|
||||||
|
def electrum_sig(coincurve_signature):
|
||||||
|
# electrum := recovery_id + r + s
|
||||||
|
# where (27 <= recovery_id <= 30)
|
||||||
|
# https://github.com/scintill/bitcoin-signature-tools/blob/ed3f5be5045af74a54c92d3648de98c329d9b4f7/key.cpp#L285
|
||||||
|
if len(coincurve_signature) != LEN_COMPACT_SIG:
|
||||||
|
raise ValueError('Not a 65-byte compact signature.')
|
||||||
|
# Compute Electrum recid
|
||||||
|
recid = coincurve_signature[-1] + RECID_UNCOMPR
|
||||||
|
if not (RECID_UNCOMPR + RECID_MIN <= recid <= RECID_UNCOMPR + RECID_MAX):
|
||||||
|
raise ValueError('Recovery ID %d is not supported.' % recid)
|
||||||
|
recid_byte = int.to_bytes(recid, length=1, byteorder='big')
|
||||||
|
return recid_byte + coincurve_signature[0:-1]
|
||||||
|
|
||||||
|
def sign_data(secretkey, byte_string):
|
||||||
|
"""Sign [byte_string] with [secretkey].
|
||||||
|
Return serialized signature compatible with Electrum (ZeroNet)."""
|
||||||
|
# encode the message
|
||||||
|
encoded = zero_format(byte_string)
|
||||||
|
# sign the message and get a coincurve signature
|
||||||
|
signature = secretkey.sign_recoverable(encoded)
|
||||||
|
# reserialize signature and return it
|
||||||
|
return electrum_sig(signature)
|
||||||
|
|
||||||
|
def verify_data(key_digest, electrum_signature, byte_string):
|
||||||
|
"""Verify if [electrum_signature] of [byte_string] is correctly signed and
|
||||||
|
is signed with the secret counterpart of [key_digest].
|
||||||
|
Raise SignatureError if the signature is forged or otherwise problematic."""
|
||||||
|
# reserialize signature
|
||||||
|
signature = coincurve_sig(electrum_signature)
|
||||||
|
# encode the message
|
||||||
|
encoded = zero_format(byte_string)
|
||||||
|
# recover full public key from signature
|
||||||
|
# "which guarantees a correct signature"
|
||||||
|
publickey = recover_public_key(signature, encoded)
|
||||||
|
|
||||||
|
# verify that the message is correctly signed by the public key
|
||||||
|
# correct_sig = verify_sig(publickey, signature, encoded)
|
||||||
|
|
||||||
|
# verify that the public key is what we expect
|
||||||
|
correct_key = verify_key(publickey, key_digest)
|
||||||
|
|
||||||
|
if not correct_key:
|
||||||
|
raise SignatureError('Signature is forged!')
|
||||||
|
|
||||||
|
def verify_sig(publickey, signature, byte_string):
|
||||||
|
return publickey.verify(signature, byte_string)
|
||||||
|
|
||||||
|
def verify_key(publickey, key_digest):
|
||||||
|
return compare_digest(key_digest, public_digest(publickey))
|
||||||
|
|
||||||
|
def recover_address(data, sign):
|
||||||
|
sign_bytes = base64.b64decode(sign)
|
||||||
|
is_compressed = ((sign_bytes[0] - 27) & 4) != 0
|
||||||
|
publickey = recover_public_key(coincurve_sig(sign_bytes), zero_format(data))
|
||||||
|
return compute_public_address(publickey, compressed=is_compressed)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'SignatureError',
|
||||||
|
'key_pair', 'compute_public_address', 'compute_secret_address',
|
||||||
|
'public_digest', 'address_public_digest', 'recover_public_key', 'decode_secret_key',
|
||||||
|
'sign_data', 'verify_data', "recover_address"
|
||||||
|
]
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import base64, time, multiprocessing
|
||||||
|
s = time.time()
|
||||||
|
privatekey = decode_secret_key(b"5JsunC55XGVqFQj5kPGK4MWgTL26jKbnPhjnmchSNPo75XXCwtk")
|
||||||
|
threads = []
|
||||||
|
for i in range(1000):
|
||||||
|
data = bytes("hello", "utf8")
|
||||||
|
address = recover_address(data, "HGbib2kv9gm9IJjDt1FXbXFczZi35u0rZR3iPUIt5GglDDCeIQ7v8eYXVNIaLoJRI4URGZrhwmsYQ9aVtRTnTfQ=")
|
||||||
|
print("- Verify x10000: %.3fs %s" % (time.time() - s, address))
|
||||||
|
|
||||||
|
s = time.time()
|
||||||
|
for i in range(1000):
|
||||||
|
privatekey = decode_secret_key(b"5JsunC55XGVqFQj5kPGK4MWgTL26jKbnPhjnmchSNPo75XXCwtk")
|
||||||
|
sign = sign_data(privatekey, b"hello")
|
||||||
|
sign_b64 = base64.b64encode(sign)
|
||||||
|
|
||||||
|
print("- Sign x1000: %.3fs" % (time.time() - s))
|
|
@ -0,0 +1,58 @@
|
||||||
|
[ req ]
|
||||||
|
default_bits = 2048
|
||||||
|
default_keyfile = server-key.pem
|
||||||
|
distinguished_name = subject
|
||||||
|
req_extensions = req_ext
|
||||||
|
x509_extensions = x509_ext
|
||||||
|
string_mask = utf8only
|
||||||
|
|
||||||
|
# The Subject DN can be formed using X501 or RFC 4514 (see RFC 4519 for a description).
|
||||||
|
# Its sort of a mashup. For example, RFC 4514 does not provide emailAddress.
|
||||||
|
[ subject ]
|
||||||
|
countryName = US
|
||||||
|
stateOrProvinceName = NY
|
||||||
|
localityName = New York
|
||||||
|
organizationName = Example, LLC
|
||||||
|
|
||||||
|
# Use a friendly name here because its presented to the user. The server's DNS
|
||||||
|
# names are placed in Subject Alternate Names. Plus, DNS names here is deprecated
|
||||||
|
# by both IETF and CA/Browser Forums. If you place a DNS name here, then you
|
||||||
|
# must include the DNS name in the SAN too (otherwise, Chrome and others that
|
||||||
|
# strictly follow the CA/Browser Baseline Requirements will fail).
|
||||||
|
commonName = Example Company
|
||||||
|
|
||||||
|
emailAddress = test@example.com
|
||||||
|
|
||||||
|
# Section x509_ext is used when generating a self-signed certificate. I.e., openssl req -x509 ...
|
||||||
|
[ x509_ext ]
|
||||||
|
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid,issuer
|
||||||
|
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth, serverAuth
|
||||||
|
subjectAltName = @alternate_names
|
||||||
|
|
||||||
|
# RFC 5280, Section 4.2.1.12 makes EKU optional
|
||||||
|
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
|
||||||
|
# extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
|
# Section req_ext is used when generating a certificate signing request. I.e., openssl req ...
|
||||||
|
[ req_ext ]
|
||||||
|
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth, serverAuth
|
||||||
|
subjectAltName = @alternate_names
|
||||||
|
|
||||||
|
# RFC 5280, Section 4.2.1.12 makes EKU optional
|
||||||
|
# CA/Browser Baseline Requirements, Appendix (B)(3)(G) makes me confused
|
||||||
|
# extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
|
[ alternate_names ]
|
||||||
|
|
||||||
|
DNS.1 = $ENV::CN
|
||||||
|
DNS.2 = www.$ENV::CN
|
|
@ -0,0 +1,22 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Richard Moore
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
|
@ -0,0 +1,363 @@
|
||||||
|
pyaes
|
||||||
|
=====
|
||||||
|
|
||||||
|
A pure-Python implementation of the AES block cipher algorithm and the common modes of operation (CBC, CFB, CTR, ECB and OFB).
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Supports all AES key sizes
|
||||||
|
* Supports all AES common modes
|
||||||
|
* Pure-Python (no external dependencies)
|
||||||
|
* BlockFeeder API allows streams to easily be encrypted and decrypted
|
||||||
|
* Python 2.x and 3.x support (make sure you pass in bytes(), not strings for Python 3)
|
||||||
|
|
||||||
|
|
||||||
|
API
|
||||||
|
---
|
||||||
|
|
||||||
|
All keys may be 128 bits (16 bytes), 192 bits (24 bytes) or 256 bits (32 bytes) long.
|
||||||
|
|
||||||
|
To generate a random key use:
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
|
||||||
|
# 128 bit, 192 bit and 256 bit keys
|
||||||
|
key_128 = os.urandom(16)
|
||||||
|
key_192 = os.urandom(24)
|
||||||
|
key_256 = os.urandom(32)
|
||||||
|
```
|
||||||
|
|
||||||
|
To generate keys from simple-to-remember passwords, consider using a _password-based key-derivation function_ such as [scrypt](https://github.com/ricmoo/pyscrypt).
|
||||||
|
|
||||||
|
|
||||||
|
### Common Modes of Operation
|
||||||
|
|
||||||
|
There are many modes of operations, each with various pros and cons. In general though, the **CBC** and **CTR** modes are recommended. The **ECB is NOT recommended.**, and is included primarily for completeness.
|
||||||
|
|
||||||
|
Each of the following examples assumes the following key:
|
||||||
|
```python
|
||||||
|
import pyaes
|
||||||
|
|
||||||
|
# A 256 bit (32 byte) key
|
||||||
|
key = "This_key_for_demo_purposes_only!"
|
||||||
|
|
||||||
|
# For some modes of operation we need a random initialization vector
|
||||||
|
# of 16 bytes
|
||||||
|
iv = "InitializationVe"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Counter Mode of Operation (recommended)
|
||||||
|
|
||||||
|
```python
|
||||||
|
aes = pyaes.AESModeOfOperationCTR(key)
|
||||||
|
plaintext = "Text may be any length you wish, no padding is required"
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# '''\xb6\x99\x10=\xa4\x96\x88\xd1\x89\x1co\xe6\x1d\xef;\x11\x03\xe3\xee
|
||||||
|
# \xa9V?wY\xbfe\xcdO\xe3\xdf\x9dV\x19\xe5\x8dk\x9fh\xb87>\xdb\xa3\xd6
|
||||||
|
# \x86\xf4\xbd\xb0\x97\xf1\t\x02\xe9 \xed'''
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
# The counter mode of operation maintains state, so decryption requires
|
||||||
|
# a new instance be created
|
||||||
|
aes = pyaes.AESModeOfOperationCTR(key)
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext
|
||||||
|
|
||||||
|
# To use a custom initial value
|
||||||
|
counter = pyaes.Counter(initial_value = 100)
|
||||||
|
aes = pyaes.AESModeOfOperationCTR(key, counter = counter)
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# '''WZ\x844\x02\xbfoY\x1f\x12\xa6\xce\x03\x82Ei)\xf6\x97mX\x86\xe3\x9d
|
||||||
|
# _1\xdd\xbd\x87\xb5\xccEM_4\x01$\xa6\x81\x0b\xd5\x04\xd7Al\x07\xe5
|
||||||
|
# \xb2\x0e\\\x0f\x00\x13,\x07'''
|
||||||
|
print repr(ciphertext)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Cipher-Block Chaining (recommended)
|
||||||
|
|
||||||
|
```python
|
||||||
|
aes = pyaes.AESModeOfOperationCBC(key, iv = iv)
|
||||||
|
plaintext = "TextMustBe16Byte"
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# '\xd6:\x18\xe6\xb1\xb3\xc3\xdc\x87\xdf\xa7|\x08{k\xb6'
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
|
||||||
|
# The cipher-block chaining mode of operation maintains state, so
|
||||||
|
# decryption requires a new instance be created
|
||||||
|
aes = pyaes.AESModeOfOperationCBC(key, iv = iv)
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Cipher Feedback
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Each block into the mode of operation must be a multiple of the segment
|
||||||
|
# size. For this example we choose 8 bytes.
|
||||||
|
aes = pyaes.AESModeOfOperationCFB(key, iv = iv, segment_size = 8)
|
||||||
|
plaintext = "TextMustBeAMultipleOfSegmentSize"
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# '''v\xa9\xc1w"\x8aL\x93\xcb\xdf\xa0/\xf8Y\x0b\x8d\x88i\xcb\x85rmp
|
||||||
|
# \x85\xfe\xafM\x0c)\xd5\xeb\xaf'''
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
|
||||||
|
# The cipher-block chaining mode of operation maintains state, so
|
||||||
|
# decryption requires a new instance be created
|
||||||
|
aes = pyaes.AESModeOfOperationCFB(key, iv = iv, segment_size = 8)
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Output Feedback Mode of Operation
|
||||||
|
|
||||||
|
```python
|
||||||
|
aes = pyaes.AESModeOfOperationOFB(key, iv = iv)
|
||||||
|
plaintext = "Text may be any length you wish, no padding is required"
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# '''v\xa9\xc1wO\x92^\x9e\rR\x1e\xf7\xb1\xa2\x9d"l1\xc7\xe7\x9d\x87(\xc26s
|
||||||
|
# \xdd8\xc8@\xb6\xd9!\xf5\x0cM\xaa\x9b\xc4\xedLD\xe4\xb9\xd8\xdf\x9e\xac
|
||||||
|
# \xa1\xb8\xea\x0f\x8ev\xb5'''
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
# The counter mode of operation maintains state, so decryption requires
|
||||||
|
# a new instance be created
|
||||||
|
aes = pyaes.AESModeOfOperationOFB(key, iv = iv)
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Electronic Codebook (NOT recommended)
|
||||||
|
|
||||||
|
```python
|
||||||
|
aes = pyaes.AESModeOfOperationECB(key)
|
||||||
|
plaintext = "TextMustBe16Byte"
|
||||||
|
ciphertext = aes.encrypt(plaintext)
|
||||||
|
|
||||||
|
# 'L6\x95\x85\xe4\xd9\xf1\x8a\xfb\xe5\x94X\x80|\x19\xc3'
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
# Since there is no state stored in this mode of operation, it
|
||||||
|
# is not necessary to create a new aes object for decryption.
|
||||||
|
#aes = pyaes.AESModeOfOperationECB(key)
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### BlockFeeder
|
||||||
|
|
||||||
|
Since most of the modes of operations require data in specific block-sized or segment-sized blocks, it can be difficult when working with large arbitrary streams or strings of data.
|
||||||
|
|
||||||
|
The BlockFeeder class is meant to make life easier for you, by buffering bytes across multiple calls and returning bytes as they are available, as well as padding or stripping the output when finished, if necessary.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pyaes
|
||||||
|
|
||||||
|
# Any mode of operation can be used; for this example CBC
|
||||||
|
key = "This_key_for_demo_purposes_only!"
|
||||||
|
iv = "InitializationVe"
|
||||||
|
|
||||||
|
ciphertext = ''
|
||||||
|
|
||||||
|
# We can encrypt one line at a time, regardles of length
|
||||||
|
encrypter = pyaes.Encrypter(pyaes.AESModeOfOperationCBC(key, iv))
|
||||||
|
for line in file('/etc/passwd'):
|
||||||
|
ciphertext += encrypter.feed(line)
|
||||||
|
|
||||||
|
# Make a final call to flush any remaining bytes and add paddin
|
||||||
|
ciphertext += encrypter.feed()
|
||||||
|
|
||||||
|
# We can decrypt the cipher text in chunks (here we split it in half)
|
||||||
|
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(key, iv))
|
||||||
|
decrypted = decrypter.feed(ciphertext[:len(ciphertext) / 2])
|
||||||
|
decrypted += decrypter.feed(ciphertext[len(ciphertext) / 2:])
|
||||||
|
|
||||||
|
# Again, make a final call to flush any remaining bytes and strip padding
|
||||||
|
decrypted += decrypter.feed()
|
||||||
|
|
||||||
|
print file('/etc/passwd').read() == decrypted
|
||||||
|
```
|
||||||
|
|
||||||
|
### Stream Feeder
|
||||||
|
|
||||||
|
This is meant to make it even easier to encrypt and decrypt streams and large files.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pyaes
|
||||||
|
|
||||||
|
# Any mode of operation can be used; for this example CTR
|
||||||
|
key = "This_key_for_demo_purposes_only!"
|
||||||
|
|
||||||
|
# Create the mode of operation to encrypt with
|
||||||
|
mode = pyaes.AESModeOfOperationCTR(key)
|
||||||
|
|
||||||
|
# The input and output files
|
||||||
|
file_in = file('/etc/passwd')
|
||||||
|
file_out = file('/tmp/encrypted.bin', 'wb')
|
||||||
|
|
||||||
|
# Encrypt the data as a stream, the file is read in 8kb chunks, be default
|
||||||
|
pyaes.encrypt_stream(mode, file_in, file_out)
|
||||||
|
|
||||||
|
# Close the files
|
||||||
|
file_in.close()
|
||||||
|
file_out.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
Decrypting is identical, except you would use `pyaes.decrypt_stream`, and the encrypted file would be the `file_in` and target for decryption the `file_out`.
|
||||||
|
|
||||||
|
### AES block cipher
|
||||||
|
|
||||||
|
Generally you should use one of the modes of operation above. This may however be useful for experimenting with a custom mode of operation or dealing with encrypted blocks.
|
||||||
|
|
||||||
|
The block cipher requires exactly one block of data to encrypt or decrypt, and each block should be an array with each element an integer representation of a byte.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pyaes
|
||||||
|
|
||||||
|
# 16 byte block of plain text
|
||||||
|
plaintext = "Hello World!!!!!"
|
||||||
|
plaintext_bytes = [ ord(c) for c in plaintext ]
|
||||||
|
|
||||||
|
# 32 byte key (256 bit)
|
||||||
|
key = "This_key_for_demo_purposes_only!"
|
||||||
|
|
||||||
|
# Our AES instance
|
||||||
|
aes = pyaes.AES(key)
|
||||||
|
|
||||||
|
# Encrypt!
|
||||||
|
ciphertext = aes.encrypt(plaintext_bytes)
|
||||||
|
|
||||||
|
# [55, 250, 182, 25, 185, 208, 186, 95, 206, 115, 50, 115, 108, 58, 174, 115]
|
||||||
|
print repr(ciphertext)
|
||||||
|
|
||||||
|
# Decrypt!
|
||||||
|
decrypted = aes.decrypt(ciphertext)
|
||||||
|
|
||||||
|
# True
|
||||||
|
print decrypted == plaintext_bytes
|
||||||
|
```
|
||||||
|
|
||||||
|
What is a key?
|
||||||
|
--------------
|
||||||
|
|
||||||
|
This seems to be a point of confusion for many people new to using encryption. You can think of the key as the *"password"*. However, these algorithms require the *"password"* to be a specific length.
|
||||||
|
|
||||||
|
With AES, there are three possible key lengths, 16-bytes, 24-bytes or 32-bytes. When you create an AES object, the key size is automatically detected, so it is important to pass in a key of the correct length.
|
||||||
|
|
||||||
|
Often, you wish to provide a password of arbitrary length, for example, something easy to remember or write down. In these cases, you must come up with a way to transform the password into a key, of a specific length. A **Password-Based Key Derivation Function** (PBKDF) is an algorithm designed for this exact purpose.
|
||||||
|
|
||||||
|
Here is an example, using the popular (possibly obsolete?) *crypt* PBKDF:
|
||||||
|
|
||||||
|
```
|
||||||
|
# See: https://www.dlitz.net/software/python-pbkdf2/
|
||||||
|
import pbkdf2
|
||||||
|
|
||||||
|
password = "HelloWorld"
|
||||||
|
|
||||||
|
# The crypt PBKDF returns a 48-byte string
|
||||||
|
key = pbkdf2.crypt(password)
|
||||||
|
|
||||||
|
# A 16-byte, 24-byte and 32-byte key, respectively
|
||||||
|
key_16 = key[:16]
|
||||||
|
key_24 = key[:24]
|
||||||
|
key_32 = key[:32]
|
||||||
|
```
|
||||||
|
|
||||||
|
The [scrypt](https://github.com/ricmoo/pyscrypt) PBKDF is intentionally slow, to make it more difficult to brute-force guess a password:
|
||||||
|
|
||||||
|
```
|
||||||
|
# See: https://github.com/ricmoo/pyscrypt
|
||||||
|
import pyscrypt
|
||||||
|
|
||||||
|
password = "HelloWorld"
|
||||||
|
|
||||||
|
# Salt is required, and prevents Rainbow Table attacks
|
||||||
|
salt = "SeaSalt"
|
||||||
|
|
||||||
|
# N, r, and p are parameters to specify how difficult it should be to
|
||||||
|
# generate a key; bigger numbers take longer and more memory
|
||||||
|
N = 1024
|
||||||
|
r = 1
|
||||||
|
p = 1
|
||||||
|
|
||||||
|
# A 16-byte, 24-byte and 32-byte key, respectively; the scrypt algorithm takes
|
||||||
|
# a 6-th parameter, indicating key length
|
||||||
|
key_16 = pyscrypt.hash(password, salt, N, r, p, 16)
|
||||||
|
key_24 = pyscrypt.hash(password, salt, N, r, p, 24)
|
||||||
|
key_32 = pyscrypt.hash(password, salt, N, r, p, 32)
|
||||||
|
```
|
||||||
|
|
||||||
|
Another possibility, is to use a hashing function, such as SHA256 to hash the password, but this method may be vulnerable to [Rainbow Attacks](http://en.wikipedia.org/wiki/Rainbow_table), unless you use a [salt](http://en.wikipedia.org/wiki/Salt_(cryptography)).
|
||||||
|
|
||||||
|
```python
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
password = "HelloWorld"
|
||||||
|
|
||||||
|
# The SHA256 hash algorithm returns a 32-byte string
|
||||||
|
hashed = hashlib.sha256(password).digest()
|
||||||
|
|
||||||
|
# A 16-byte, 24-byte and 32-byte key, respectively
|
||||||
|
key_16 = hashed[:16]
|
||||||
|
key_24 = hashed[:24]
|
||||||
|
key_32 = hashed
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Performance
|
||||||
|
-----------
|
||||||
|
|
||||||
|
There is a test case provided in _/tests/test-aes.py_ which does some basic performance testing (its primary purpose is moreso as a regression test).
|
||||||
|
|
||||||
|
Based on that test, in **CPython**, this library is about 30x slower than [PyCrypto](https://www.dlitz.net/software/pycrypto/) for CBC, ECB and OFB; about 80x slower for CFB; and 300x slower for CTR.
|
||||||
|
|
||||||
|
Based on that same test, in **Pypy**, this library is about 4x slower than [PyCrypto](https://www.dlitz.net/software/pycrypto/) for CBC, ECB and OFB; about 12x slower for CFB; and 19x slower for CTR.
|
||||||
|
|
||||||
|
The PyCrypto documentation makes reference to the counter call being responsible for the speed problems of the counter (CTR) mode of operation, which is why they use a specially optimized counter. I will investigate this problem further in the future.
|
||||||
|
|
||||||
|
|
||||||
|
FAQ
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Why do this?
|
||||||
|
|
||||||
|
The short answer, *why not?*
|
||||||
|
|
||||||
|
The longer answer, is for my [pyscrypt](https://github.com/ricmoo/pyscrypt) library. I required a pure-Python AES implementation that supported 256-bit keys with the counter (CTR) mode of operation. After searching, I found several implementations, but all were missing CTR or only supported 128 bit keys. After all the work of learning AES inside and out to implement the library, it was only a marginal amount of extra work to library-ify a more general solution. So, *why not?*
|
||||||
|
|
||||||
|
#### How do I get a question I have added?
|
||||||
|
|
||||||
|
E-mail me at pyaes@ricmoo.com with any questions, suggestions, comments, et cetera.
|
||||||
|
|
||||||
|
|
||||||
|
#### Can I give you my money?
|
||||||
|
|
||||||
|
Umm... Ok? :-)
|
||||||
|
|
||||||
|
_Bitcoin_ - `18UDs4qV1shu2CgTS2tKojhCtM69kpnWg9`
|
|
@ -0,0 +1,53 @@
|
||||||
|
# The MIT License (MIT)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2014 Richard Moore
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
# This is a pure-Python implementation of the AES algorithm and AES common
|
||||||
|
# modes of operation.
|
||||||
|
|
||||||
|
# See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
|
||||||
|
# See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
|
||||||
|
|
||||||
|
|
||||||
|
# Supported key sizes:
|
||||||
|
# 128-bit
|
||||||
|
# 192-bit
|
||||||
|
# 256-bit
|
||||||
|
|
||||||
|
|
||||||
|
# Supported modes of operation:
|
||||||
|
# ECB - Electronic Codebook
|
||||||
|
# CBC - Cipher-Block Chaining
|
||||||
|
# CFB - Cipher Feedback
|
||||||
|
# OFB - Output Feedback
|
||||||
|
# CTR - Counter
|
||||||
|
|
||||||
|
# See the README.md for API details and general information.
|
||||||
|
|
||||||
|
# Also useful, PyCrypto, a crypto library implemented in C with Python bindings:
|
||||||
|
# https://www.dlitz.net/software/pycrypto/
|
||||||
|
|
||||||
|
|
||||||
|
VERSION = [1, 3, 0]
|
||||||
|
|
||||||
|
from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter
|
||||||
|
from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter
|
||||||
|
from .blockfeeder import PADDING_NONE, PADDING_DEFAULT
|
|
@ -0,0 +1,589 @@
|
||||||
|
# The MIT License (MIT)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2014 Richard Moore
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
# This is a pure-Python implementation of the AES algorithm and AES common
|
||||||
|
# modes of operation.
|
||||||
|
|
||||||
|
# See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
|
||||||
|
|
||||||
|
# Honestly, the best description of the modes of operations are the wonderful
|
||||||
|
# diagrams on Wikipedia. They explain in moments what my words could never
|
||||||
|
# achieve. Hence the inline documentation here is sparer than I'd prefer.
|
||||||
|
# See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
|
||||||
|
|
||||||
|
# Also useful, PyCrypto, a crypto library implemented in C with Python bindings:
|
||||||
|
# https://www.dlitz.net/software/pycrypto/
|
||||||
|
|
||||||
|
|
||||||
|
# Supported key sizes:
|
||||||
|
# 128-bit
|
||||||
|
# 192-bit
|
||||||
|
# 256-bit
|
||||||
|
|
||||||
|
|
||||||
|
# Supported modes of operation:
|
||||||
|
# ECB - Electronic Codebook
|
||||||
|
# CBC - Cipher-Block Chaining
|
||||||
|
# CFB - Cipher Feedback
|
||||||
|
# OFB - Output Feedback
|
||||||
|
# CTR - Counter
|
||||||
|
|
||||||
|
|
||||||
|
# See the README.md for API details and general information.
|
||||||
|
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import struct
|
||||||
|
|
||||||
|
__all__ = ["AES", "AESModeOfOperationCTR", "AESModeOfOperationCBC", "AESModeOfOperationCFB",
|
||||||
|
"AESModeOfOperationECB", "AESModeOfOperationOFB", "AESModesOfOperation", "Counter"]
|
||||||
|
|
||||||
|
|
||||||
|
def _compact_word(word):
|
||||||
|
return (word[0] << 24) | (word[1] << 16) | (word[2] << 8) | word[3]
|
||||||
|
|
||||||
|
def _string_to_bytes(text):
|
||||||
|
return list(ord(c) for c in text)
|
||||||
|
|
||||||
|
def _bytes_to_string(binary):
|
||||||
|
return "".join(chr(b) for b in binary)
|
||||||
|
|
||||||
|
def _concat_list(a, b):
|
||||||
|
return a + b
|
||||||
|
|
||||||
|
|
||||||
|
# Python 3 compatibility
|
||||||
|
try:
|
||||||
|
xrange
|
||||||
|
except Exception:
|
||||||
|
xrange = range
|
||||||
|
|
||||||
|
# Python 3 supports bytes, which is already an array of integers
|
||||||
|
def _string_to_bytes(text):
|
||||||
|
if isinstance(text, bytes):
|
||||||
|
return text
|
||||||
|
return [ord(c) for c in text]
|
||||||
|
|
||||||
|
# In Python 3, we return bytes
|
||||||
|
def _bytes_to_string(binary):
|
||||||
|
return bytes(binary)
|
||||||
|
|
||||||
|
# Python 3 cannot concatenate a list onto a bytes, so we bytes-ify it first
|
||||||
|
def _concat_list(a, b):
|
||||||
|
return a + bytes(b)
|
||||||
|
|
||||||
|
|
||||||
|
# Based *largely* on the Rijndael implementation
|
||||||
|
# See: http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
|
||||||
|
class AES(object):
|
||||||
|
'''Encapsulates the AES block cipher.
|
||||||
|
|
||||||
|
You generally should not need this. Use the AESModeOfOperation classes
|
||||||
|
below instead.'''
|
||||||
|
|
||||||
|
# Number of rounds by keysize
|
||||||
|
number_of_rounds = {16: 10, 24: 12, 32: 14}
|
||||||
|
|
||||||
|
# Round constant words
|
||||||
|
rcon = [ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91 ]
|
||||||
|
|
||||||
|
# S-box and Inverse S-box (S is for Substitution)
|
||||||
|
S = [ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 ]
|
||||||
|
Si =[ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d ]
|
||||||
|
|
||||||
|
# Transformations for encryption
|
||||||
|
T1 = [ 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554, 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a, 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b, 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b, 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f, 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f, 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5, 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f, 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb, 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497, 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed, 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a, 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594, 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3, 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504, 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d, 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739, 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395, 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883, 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76, 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4, 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b, 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0, 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818, 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651, 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85, 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12, 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9, 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7, 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a, 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8, 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a ]
|
||||||
|
T2 = [ 0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5, 0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676, 0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0, 0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0, 0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc, 0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515, 0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a, 0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575, 0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0, 0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484, 0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b, 0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf, 0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585, 0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8, 0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5, 0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2, 0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717, 0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373, 0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888, 0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb, 0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c, 0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979, 0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9, 0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808, 0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6, 0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a, 0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e, 0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e, 0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494, 0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf, 0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868, 0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616 ]
|
||||||
|
T3 = [ 0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5, 0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76, 0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0, 0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0, 0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc, 0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15, 0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a, 0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75, 0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0, 0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384, 0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b, 0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf, 0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185, 0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8, 0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5, 0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2, 0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17, 0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673, 0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88, 0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb, 0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c, 0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279, 0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9, 0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008, 0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6, 0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a, 0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e, 0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e, 0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394, 0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df, 0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068, 0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16 ]
|
||||||
|
T4 = [ 0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491, 0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec, 0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb, 0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b, 0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83, 0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a, 0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f, 0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea, 0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b, 0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713, 0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6, 0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85, 0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411, 0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b, 0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1, 0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf, 0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e, 0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6, 0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b, 0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad, 0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8, 0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2, 0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049, 0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810, 0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197, 0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f, 0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c, 0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927, 0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733, 0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5, 0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0, 0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c ]
|
||||||
|
|
||||||
|
# Transformations for decryption
|
||||||
|
T5 = [ 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393, 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f, 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6, 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844, 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4, 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94, 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a, 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c, 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a, 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051, 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff, 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb, 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e, 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a, 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16, 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8, 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34, 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120, 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0, 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef, 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4, 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5, 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b, 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6, 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0, 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f, 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f, 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713, 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c, 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86, 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541, 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 ]
|
||||||
|
T6 = [ 0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303, 0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3, 0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9, 0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8, 0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a, 0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b, 0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab, 0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682, 0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe, 0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10, 0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015, 0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee, 0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72, 0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e, 0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a, 0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9, 0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e, 0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611, 0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3, 0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390, 0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf, 0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af, 0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb, 0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8, 0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266, 0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6, 0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551, 0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647, 0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1, 0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db, 0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95, 0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857 ]
|
||||||
|
T7 = [ 0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3, 0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562, 0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3, 0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9, 0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce, 0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908, 0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655, 0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16, 0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6, 0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e, 0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050, 0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8, 0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a, 0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436, 0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12, 0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e, 0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb, 0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6, 0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1, 0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233, 0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad, 0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3, 0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b, 0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15, 0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2, 0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791, 0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665, 0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6, 0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47, 0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844, 0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d, 0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8 ]
|
||||||
|
T8 = [ 0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b, 0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5, 0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b, 0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e, 0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d, 0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9, 0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66, 0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced, 0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4, 0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd, 0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60, 0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79, 0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c, 0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24, 0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c, 0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814, 0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b, 0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084, 0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077, 0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22, 0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f, 0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582, 0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb, 0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef, 0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035, 0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17, 0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46, 0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d, 0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a, 0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678, 0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff, 0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0 ]
|
||||||
|
|
||||||
|
# Transformations for decryption key expansion
|
||||||
|
U1 = [ 0x00000000, 0x0e090d0b, 0x1c121a16, 0x121b171d, 0x3824342c, 0x362d3927, 0x24362e3a, 0x2a3f2331, 0x70486858, 0x7e416553, 0x6c5a724e, 0x62537f45, 0x486c5c74, 0x4665517f, 0x547e4662, 0x5a774b69, 0xe090d0b0, 0xee99ddbb, 0xfc82caa6, 0xf28bc7ad, 0xd8b4e49c, 0xd6bde997, 0xc4a6fe8a, 0xcaaff381, 0x90d8b8e8, 0x9ed1b5e3, 0x8ccaa2fe, 0x82c3aff5, 0xa8fc8cc4, 0xa6f581cf, 0xb4ee96d2, 0xbae79bd9, 0xdb3bbb7b, 0xd532b670, 0xc729a16d, 0xc920ac66, 0xe31f8f57, 0xed16825c, 0xff0d9541, 0xf104984a, 0xab73d323, 0xa57ade28, 0xb761c935, 0xb968c43e, 0x9357e70f, 0x9d5eea04, 0x8f45fd19, 0x814cf012, 0x3bab6bcb, 0x35a266c0, 0x27b971dd, 0x29b07cd6, 0x038f5fe7, 0x0d8652ec, 0x1f9d45f1, 0x119448fa, 0x4be30393, 0x45ea0e98, 0x57f11985, 0x59f8148e, 0x73c737bf, 0x7dce3ab4, 0x6fd52da9, 0x61dc20a2, 0xad766df6, 0xa37f60fd, 0xb16477e0, 0xbf6d7aeb, 0x955259da, 0x9b5b54d1, 0x894043cc, 0x87494ec7, 0xdd3e05ae, 0xd33708a5, 0xc12c1fb8, 0xcf2512b3, 0xe51a3182, 0xeb133c89, 0xf9082b94, 0xf701269f, 0x4de6bd46, 0x43efb04d, 0x51f4a750, 0x5ffdaa5b, 0x75c2896a, 0x7bcb8461, 0x69d0937c, 0x67d99e77, 0x3daed51e, 0x33a7d815, 0x21bccf08, 0x2fb5c203, 0x058ae132, 0x0b83ec39, 0x1998fb24, 0x1791f62f, 0x764dd68d, 0x7844db86, 0x6a5fcc9b, 0x6456c190, 0x4e69e2a1, 0x4060efaa, 0x527bf8b7, 0x5c72f5bc, 0x0605bed5, 0x080cb3de, 0x1a17a4c3, 0x141ea9c8, 0x3e218af9, 0x302887f2, 0x223390ef, 0x2c3a9de4, 0x96dd063d, 0x98d40b36, 0x8acf1c2b, 0x84c61120, 0xaef93211, 0xa0f03f1a, 0xb2eb2807, 0xbce2250c, 0xe6956e65, 0xe89c636e, 0xfa877473, 0xf48e7978, 0xdeb15a49, 0xd0b85742, 0xc2a3405f, 0xccaa4d54, 0x41ecdaf7, 0x4fe5d7fc, 0x5dfec0e1, 0x53f7cdea, 0x79c8eedb, 0x77c1e3d0, 0x65daf4cd, 0x6bd3f9c6, 0x31a4b2af, 0x3fadbfa4, 0x2db6a8b9, 0x23bfa5b2, 0x09808683, 0x07898b88, 0x15929c95, 0x1b9b919e, 0xa17c0a47, 0xaf75074c, 0xbd6e1051, 0xb3671d5a, 0x99583e6b, 0x97513360, 0x854a247d, 0x8b432976, 0xd134621f, 0xdf3d6f14, 0xcd267809, 0xc32f7502, 0xe9105633, 0xe7195b38, 0xf5024c25, 0xfb0b412e, 0x9ad7618c, 0x94de6c87, 0x86c57b9a, 0x88cc7691, 0xa2f355a0, 0xacfa58ab, 0xbee14fb6, 0xb0e842bd, 0xea9f09d4, 0xe49604df, 0xf68d13c2, 0xf8841ec9, 0xd2bb3df8, 0xdcb230f3, 0xcea927ee, 0xc0a02ae5, 0x7a47b13c, 0x744ebc37, 0x6655ab2a, 0x685ca621, 0x42638510, 0x4c6a881b, 0x5e719f06, 0x5078920d, 0x0a0fd964, 0x0406d46f, 0x161dc372, 0x1814ce79, 0x322bed48, 0x3c22e043, 0x2e39f75e, 0x2030fa55, 0xec9ab701, 0xe293ba0a, 0xf088ad17, 0xfe81a01c, 0xd4be832d, 0xdab78e26, 0xc8ac993b, 0xc6a59430, 0x9cd2df59, 0x92dbd252, 0x80c0c54f, 0x8ec9c844, 0xa4f6eb75, 0xaaffe67e, 0xb8e4f163, 0xb6edfc68, 0x0c0a67b1, 0x02036aba, 0x10187da7, 0x1e1170ac, 0x342e539d, 0x3a275e96, 0x283c498b, 0x26354480, 0x7c420fe9, 0x724b02e2, 0x605015ff, 0x6e5918f4, 0x44663bc5, 0x4a6f36ce, 0x587421d3, 0x567d2cd8, 0x37a10c7a, 0x39a80171, 0x2bb3166c, 0x25ba1b67, 0x0f853856, 0x018c355d, 0x13972240, 0x1d9e2f4b, 0x47e96422, 0x49e06929, 0x5bfb7e34, 0x55f2733f, 0x7fcd500e, 0x71c45d05, 0x63df4a18, 0x6dd64713, 0xd731dcca, 0xd938d1c1, 0xcb23c6dc, 0xc52acbd7, 0xef15e8e6, 0xe11ce5ed, 0xf307f2f0, 0xfd0efffb, 0xa779b492, 0xa970b999, 0xbb6bae84, 0xb562a38f, 0x9f5d80be, 0x91548db5, 0x834f9aa8, 0x8d4697a3 ]
|
||||||
|
U2 = [ 0x00000000, 0x0b0e090d, 0x161c121a, 0x1d121b17, 0x2c382434, 0x27362d39, 0x3a24362e, 0x312a3f23, 0x58704868, 0x537e4165, 0x4e6c5a72, 0x4562537f, 0x74486c5c, 0x7f466551, 0x62547e46, 0x695a774b, 0xb0e090d0, 0xbbee99dd, 0xa6fc82ca, 0xadf28bc7, 0x9cd8b4e4, 0x97d6bde9, 0x8ac4a6fe, 0x81caaff3, 0xe890d8b8, 0xe39ed1b5, 0xfe8ccaa2, 0xf582c3af, 0xc4a8fc8c, 0xcfa6f581, 0xd2b4ee96, 0xd9bae79b, 0x7bdb3bbb, 0x70d532b6, 0x6dc729a1, 0x66c920ac, 0x57e31f8f, 0x5ced1682, 0x41ff0d95, 0x4af10498, 0x23ab73d3, 0x28a57ade, 0x35b761c9, 0x3eb968c4, 0x0f9357e7, 0x049d5eea, 0x198f45fd, 0x12814cf0, 0xcb3bab6b, 0xc035a266, 0xdd27b971, 0xd629b07c, 0xe7038f5f, 0xec0d8652, 0xf11f9d45, 0xfa119448, 0x934be303, 0x9845ea0e, 0x8557f119, 0x8e59f814, 0xbf73c737, 0xb47dce3a, 0xa96fd52d, 0xa261dc20, 0xf6ad766d, 0xfda37f60, 0xe0b16477, 0xebbf6d7a, 0xda955259, 0xd19b5b54, 0xcc894043, 0xc787494e, 0xaedd3e05, 0xa5d33708, 0xb8c12c1f, 0xb3cf2512, 0x82e51a31, 0x89eb133c, 0x94f9082b, 0x9ff70126, 0x464de6bd, 0x4d43efb0, 0x5051f4a7, 0x5b5ffdaa, 0x6a75c289, 0x617bcb84, 0x7c69d093, 0x7767d99e, 0x1e3daed5, 0x1533a7d8, 0x0821bccf, 0x032fb5c2, 0x32058ae1, 0x390b83ec, 0x241998fb, 0x2f1791f6, 0x8d764dd6, 0x867844db, 0x9b6a5fcc, 0x906456c1, 0xa14e69e2, 0xaa4060ef, 0xb7527bf8, 0xbc5c72f5, 0xd50605be, 0xde080cb3, 0xc31a17a4, 0xc8141ea9, 0xf93e218a, 0xf2302887, 0xef223390, 0xe42c3a9d, 0x3d96dd06, 0x3698d40b, 0x2b8acf1c, 0x2084c611, 0x11aef932, 0x1aa0f03f, 0x07b2eb28, 0x0cbce225, 0x65e6956e, 0x6ee89c63, 0x73fa8774, 0x78f48e79, 0x49deb15a, 0x42d0b857, 0x5fc2a340, 0x54ccaa4d, 0xf741ecda, 0xfc4fe5d7, 0xe15dfec0, 0xea53f7cd, 0xdb79c8ee, 0xd077c1e3, 0xcd65daf4, 0xc66bd3f9, 0xaf31a4b2, 0xa43fadbf, 0xb92db6a8, 0xb223bfa5, 0x83098086, 0x8807898b, 0x9515929c, 0x9e1b9b91, 0x47a17c0a, 0x4caf7507, 0x51bd6e10, 0x5ab3671d, 0x6b99583e, 0x60975133, 0x7d854a24, 0x768b4329, 0x1fd13462, 0x14df3d6f, 0x09cd2678, 0x02c32f75, 0x33e91056, 0x38e7195b, 0x25f5024c, 0x2efb0b41, 0x8c9ad761, 0x8794de6c, 0x9a86c57b, 0x9188cc76, 0xa0a2f355, 0xabacfa58, 0xb6bee14f, 0xbdb0e842, 0xd4ea9f09, 0xdfe49604, 0xc2f68d13, 0xc9f8841e, 0xf8d2bb3d, 0xf3dcb230, 0xeecea927, 0xe5c0a02a, 0x3c7a47b1, 0x37744ebc, 0x2a6655ab, 0x21685ca6, 0x10426385, 0x1b4c6a88, 0x065e719f, 0x0d507892, 0x640a0fd9, 0x6f0406d4, 0x72161dc3, 0x791814ce, 0x48322bed, 0x433c22e0, 0x5e2e39f7, 0x552030fa, 0x01ec9ab7, 0x0ae293ba, 0x17f088ad, 0x1cfe81a0, 0x2dd4be83, 0x26dab78e, 0x3bc8ac99, 0x30c6a594, 0x599cd2df, 0x5292dbd2, 0x4f80c0c5, 0x448ec9c8, 0x75a4f6eb, 0x7eaaffe6, 0x63b8e4f1, 0x68b6edfc, 0xb10c0a67, 0xba02036a, 0xa710187d, 0xac1e1170, 0x9d342e53, 0x963a275e, 0x8b283c49, 0x80263544, 0xe97c420f, 0xe2724b02, 0xff605015, 0xf46e5918, 0xc544663b, 0xce4a6f36, 0xd3587421, 0xd8567d2c, 0x7a37a10c, 0x7139a801, 0x6c2bb316, 0x6725ba1b, 0x560f8538, 0x5d018c35, 0x40139722, 0x4b1d9e2f, 0x2247e964, 0x2949e069, 0x345bfb7e, 0x3f55f273, 0x0e7fcd50, 0x0571c45d, 0x1863df4a, 0x136dd647, 0xcad731dc, 0xc1d938d1, 0xdccb23c6, 0xd7c52acb, 0xe6ef15e8, 0xede11ce5, 0xf0f307f2, 0xfbfd0eff, 0x92a779b4, 0x99a970b9, 0x84bb6bae, 0x8fb562a3, 0xbe9f5d80, 0xb591548d, 0xa8834f9a, 0xa38d4697 ]
|
||||||
|
U3 = [ 0x00000000, 0x0d0b0e09, 0x1a161c12, 0x171d121b, 0x342c3824, 0x3927362d, 0x2e3a2436, 0x23312a3f, 0x68587048, 0x65537e41, 0x724e6c5a, 0x7f456253, 0x5c74486c, 0x517f4665, 0x4662547e, 0x4b695a77, 0xd0b0e090, 0xddbbee99, 0xcaa6fc82, 0xc7adf28b, 0xe49cd8b4, 0xe997d6bd, 0xfe8ac4a6, 0xf381caaf, 0xb8e890d8, 0xb5e39ed1, 0xa2fe8cca, 0xaff582c3, 0x8cc4a8fc, 0x81cfa6f5, 0x96d2b4ee, 0x9bd9bae7, 0xbb7bdb3b, 0xb670d532, 0xa16dc729, 0xac66c920, 0x8f57e31f, 0x825ced16, 0x9541ff0d, 0x984af104, 0xd323ab73, 0xde28a57a, 0xc935b761, 0xc43eb968, 0xe70f9357, 0xea049d5e, 0xfd198f45, 0xf012814c, 0x6bcb3bab, 0x66c035a2, 0x71dd27b9, 0x7cd629b0, 0x5fe7038f, 0x52ec0d86, 0x45f11f9d, 0x48fa1194, 0x03934be3, 0x0e9845ea, 0x198557f1, 0x148e59f8, 0x37bf73c7, 0x3ab47dce, 0x2da96fd5, 0x20a261dc, 0x6df6ad76, 0x60fda37f, 0x77e0b164, 0x7aebbf6d, 0x59da9552, 0x54d19b5b, 0x43cc8940, 0x4ec78749, 0x05aedd3e, 0x08a5d337, 0x1fb8c12c, 0x12b3cf25, 0x3182e51a, 0x3c89eb13, 0x2b94f908, 0x269ff701, 0xbd464de6, 0xb04d43ef, 0xa75051f4, 0xaa5b5ffd, 0x896a75c2, 0x84617bcb, 0x937c69d0, 0x9e7767d9, 0xd51e3dae, 0xd81533a7, 0xcf0821bc, 0xc2032fb5, 0xe132058a, 0xec390b83, 0xfb241998, 0xf62f1791, 0xd68d764d, 0xdb867844, 0xcc9b6a5f, 0xc1906456, 0xe2a14e69, 0xefaa4060, 0xf8b7527b, 0xf5bc5c72, 0xbed50605, 0xb3de080c, 0xa4c31a17, 0xa9c8141e, 0x8af93e21, 0x87f23028, 0x90ef2233, 0x9de42c3a, 0x063d96dd, 0x0b3698d4, 0x1c2b8acf, 0x112084c6, 0x3211aef9, 0x3f1aa0f0, 0x2807b2eb, 0x250cbce2, 0x6e65e695, 0x636ee89c, 0x7473fa87, 0x7978f48e, 0x5a49deb1, 0x5742d0b8, 0x405fc2a3, 0x4d54ccaa, 0xdaf741ec, 0xd7fc4fe5, 0xc0e15dfe, 0xcdea53f7, 0xeedb79c8, 0xe3d077c1, 0xf4cd65da, 0xf9c66bd3, 0xb2af31a4, 0xbfa43fad, 0xa8b92db6, 0xa5b223bf, 0x86830980, 0x8b880789, 0x9c951592, 0x919e1b9b, 0x0a47a17c, 0x074caf75, 0x1051bd6e, 0x1d5ab367, 0x3e6b9958, 0x33609751, 0x247d854a, 0x29768b43, 0x621fd134, 0x6f14df3d, 0x7809cd26, 0x7502c32f, 0x5633e910, 0x5b38e719, 0x4c25f502, 0x412efb0b, 0x618c9ad7, 0x6c8794de, 0x7b9a86c5, 0x769188cc, 0x55a0a2f3, 0x58abacfa, 0x4fb6bee1, 0x42bdb0e8, 0x09d4ea9f, 0x04dfe496, 0x13c2f68d, 0x1ec9f884, 0x3df8d2bb, 0x30f3dcb2, 0x27eecea9, 0x2ae5c0a0, 0xb13c7a47, 0xbc37744e, 0xab2a6655, 0xa621685c, 0x85104263, 0x881b4c6a, 0x9f065e71, 0x920d5078, 0xd9640a0f, 0xd46f0406, 0xc372161d, 0xce791814, 0xed48322b, 0xe0433c22, 0xf75e2e39, 0xfa552030, 0xb701ec9a, 0xba0ae293, 0xad17f088, 0xa01cfe81, 0x832dd4be, 0x8e26dab7, 0x993bc8ac, 0x9430c6a5, 0xdf599cd2, 0xd25292db, 0xc54f80c0, 0xc8448ec9, 0xeb75a4f6, 0xe67eaaff, 0xf163b8e4, 0xfc68b6ed, 0x67b10c0a, 0x6aba0203, 0x7da71018, 0x70ac1e11, 0x539d342e, 0x5e963a27, 0x498b283c, 0x44802635, 0x0fe97c42, 0x02e2724b, 0x15ff6050, 0x18f46e59, 0x3bc54466, 0x36ce4a6f, 0x21d35874, 0x2cd8567d, 0x0c7a37a1, 0x017139a8, 0x166c2bb3, 0x1b6725ba, 0x38560f85, 0x355d018c, 0x22401397, 0x2f4b1d9e, 0x642247e9, 0x692949e0, 0x7e345bfb, 0x733f55f2, 0x500e7fcd, 0x5d0571c4, 0x4a1863df, 0x47136dd6, 0xdccad731, 0xd1c1d938, 0xc6dccb23, 0xcbd7c52a, 0xe8e6ef15, 0xe5ede11c, 0xf2f0f307, 0xfffbfd0e, 0xb492a779, 0xb999a970, 0xae84bb6b, 0xa38fb562, 0x80be9f5d, 0x8db59154, 0x9aa8834f, 0x97a38d46 ]
|
||||||
|
U4 = [ 0x00000000, 0x090d0b0e, 0x121a161c, 0x1b171d12, 0x24342c38, 0x2d392736, 0x362e3a24, 0x3f23312a, 0x48685870, 0x4165537e, 0x5a724e6c, 0x537f4562, 0x6c5c7448, 0x65517f46, 0x7e466254, 0x774b695a, 0x90d0b0e0, 0x99ddbbee, 0x82caa6fc, 0x8bc7adf2, 0xb4e49cd8, 0xbde997d6, 0xa6fe8ac4, 0xaff381ca, 0xd8b8e890, 0xd1b5e39e, 0xcaa2fe8c, 0xc3aff582, 0xfc8cc4a8, 0xf581cfa6, 0xee96d2b4, 0xe79bd9ba, 0x3bbb7bdb, 0x32b670d5, 0x29a16dc7, 0x20ac66c9, 0x1f8f57e3, 0x16825ced, 0x0d9541ff, 0x04984af1, 0x73d323ab, 0x7ade28a5, 0x61c935b7, 0x68c43eb9, 0x57e70f93, 0x5eea049d, 0x45fd198f, 0x4cf01281, 0xab6bcb3b, 0xa266c035, 0xb971dd27, 0xb07cd629, 0x8f5fe703, 0x8652ec0d, 0x9d45f11f, 0x9448fa11, 0xe303934b, 0xea0e9845, 0xf1198557, 0xf8148e59, 0xc737bf73, 0xce3ab47d, 0xd52da96f, 0xdc20a261, 0x766df6ad, 0x7f60fda3, 0x6477e0b1, 0x6d7aebbf, 0x5259da95, 0x5b54d19b, 0x4043cc89, 0x494ec787, 0x3e05aedd, 0x3708a5d3, 0x2c1fb8c1, 0x2512b3cf, 0x1a3182e5, 0x133c89eb, 0x082b94f9, 0x01269ff7, 0xe6bd464d, 0xefb04d43, 0xf4a75051, 0xfdaa5b5f, 0xc2896a75, 0xcb84617b, 0xd0937c69, 0xd99e7767, 0xaed51e3d, 0xa7d81533, 0xbccf0821, 0xb5c2032f, 0x8ae13205, 0x83ec390b, 0x98fb2419, 0x91f62f17, 0x4dd68d76, 0x44db8678, 0x5fcc9b6a, 0x56c19064, 0x69e2a14e, 0x60efaa40, 0x7bf8b752, 0x72f5bc5c, 0x05bed506, 0x0cb3de08, 0x17a4c31a, 0x1ea9c814, 0x218af93e, 0x2887f230, 0x3390ef22, 0x3a9de42c, 0xdd063d96, 0xd40b3698, 0xcf1c2b8a, 0xc6112084, 0xf93211ae, 0xf03f1aa0, 0xeb2807b2, 0xe2250cbc, 0x956e65e6, 0x9c636ee8, 0x877473fa, 0x8e7978f4, 0xb15a49de, 0xb85742d0, 0xa3405fc2, 0xaa4d54cc, 0xecdaf741, 0xe5d7fc4f, 0xfec0e15d, 0xf7cdea53, 0xc8eedb79, 0xc1e3d077, 0xdaf4cd65, 0xd3f9c66b, 0xa4b2af31, 0xadbfa43f, 0xb6a8b92d, 0xbfa5b223, 0x80868309, 0x898b8807, 0x929c9515, 0x9b919e1b, 0x7c0a47a1, 0x75074caf, 0x6e1051bd, 0x671d5ab3, 0x583e6b99, 0x51336097, 0x4a247d85, 0x4329768b, 0x34621fd1, 0x3d6f14df, 0x267809cd, 0x2f7502c3, 0x105633e9, 0x195b38e7, 0x024c25f5, 0x0b412efb, 0xd7618c9a, 0xde6c8794, 0xc57b9a86, 0xcc769188, 0xf355a0a2, 0xfa58abac, 0xe14fb6be, 0xe842bdb0, 0x9f09d4ea, 0x9604dfe4, 0x8d13c2f6, 0x841ec9f8, 0xbb3df8d2, 0xb230f3dc, 0xa927eece, 0xa02ae5c0, 0x47b13c7a, 0x4ebc3774, 0x55ab2a66, 0x5ca62168, 0x63851042, 0x6a881b4c, 0x719f065e, 0x78920d50, 0x0fd9640a, 0x06d46f04, 0x1dc37216, 0x14ce7918, 0x2bed4832, 0x22e0433c, 0x39f75e2e, 0x30fa5520, 0x9ab701ec, 0x93ba0ae2, 0x88ad17f0, 0x81a01cfe, 0xbe832dd4, 0xb78e26da, 0xac993bc8, 0xa59430c6, 0xd2df599c, 0xdbd25292, 0xc0c54f80, 0xc9c8448e, 0xf6eb75a4, 0xffe67eaa, 0xe4f163b8, 0xedfc68b6, 0x0a67b10c, 0x036aba02, 0x187da710, 0x1170ac1e, 0x2e539d34, 0x275e963a, 0x3c498b28, 0x35448026, 0x420fe97c, 0x4b02e272, 0x5015ff60, 0x5918f46e, 0x663bc544, 0x6f36ce4a, 0x7421d358, 0x7d2cd856, 0xa10c7a37, 0xa8017139, 0xb3166c2b, 0xba1b6725, 0x8538560f, 0x8c355d01, 0x97224013, 0x9e2f4b1d, 0xe9642247, 0xe0692949, 0xfb7e345b, 0xf2733f55, 0xcd500e7f, 0xc45d0571, 0xdf4a1863, 0xd647136d, 0x31dccad7, 0x38d1c1d9, 0x23c6dccb, 0x2acbd7c5, 0x15e8e6ef, 0x1ce5ede1, 0x07f2f0f3, 0x0efffbfd, 0x79b492a7, 0x70b999a9, 0x6bae84bb, 0x62a38fb5, 0x5d80be9f, 0x548db591, 0x4f9aa883, 0x4697a38d ]
|
||||||
|
|
||||||
|
def __init__(self, key):
|
||||||
|
|
||||||
|
if len(key) not in (16, 24, 32):
|
||||||
|
raise ValueError('Invalid key size')
|
||||||
|
|
||||||
|
rounds = self.number_of_rounds[len(key)]
|
||||||
|
|
||||||
|
# Encryption round keys
|
||||||
|
self._Ke = [[0] * 4 for i in xrange(rounds + 1)]
|
||||||
|
|
||||||
|
# Decryption round keys
|
||||||
|
self._Kd = [[0] * 4 for i in xrange(rounds + 1)]
|
||||||
|
|
||||||
|
round_key_count = (rounds + 1) * 4
|
||||||
|
KC = len(key) // 4
|
||||||
|
|
||||||
|
# Convert the key into ints
|
||||||
|
tk = [ struct.unpack('>i', key[i:i + 4])[0] for i in xrange(0, len(key), 4) ]
|
||||||
|
|
||||||
|
# Copy values into round key arrays
|
||||||
|
for i in xrange(0, KC):
|
||||||
|
self._Ke[i // 4][i % 4] = tk[i]
|
||||||
|
self._Kd[rounds - (i // 4)][i % 4] = tk[i]
|
||||||
|
|
||||||
|
# Key expansion (fips-197 section 5.2)
|
||||||
|
rconpointer = 0
|
||||||
|
t = KC
|
||||||
|
while t < round_key_count:
|
||||||
|
|
||||||
|
tt = tk[KC - 1]
|
||||||
|
tk[0] ^= ((self.S[(tt >> 16) & 0xFF] << 24) ^
|
||||||
|
(self.S[(tt >> 8) & 0xFF] << 16) ^
|
||||||
|
(self.S[ tt & 0xFF] << 8) ^
|
||||||
|
self.S[(tt >> 24) & 0xFF] ^
|
||||||
|
(self.rcon[rconpointer] << 24))
|
||||||
|
rconpointer += 1
|
||||||
|
|
||||||
|
if KC != 8:
|
||||||
|
for i in xrange(1, KC):
|
||||||
|
tk[i] ^= tk[i - 1]
|
||||||
|
|
||||||
|
# Key expansion for 256-bit keys is "slightly different" (fips-197)
|
||||||
|
else:
|
||||||
|
for i in xrange(1, KC // 2):
|
||||||
|
tk[i] ^= tk[i - 1]
|
||||||
|
tt = tk[KC // 2 - 1]
|
||||||
|
|
||||||
|
tk[KC // 2] ^= (self.S[ tt & 0xFF] ^
|
||||||
|
(self.S[(tt >> 8) & 0xFF] << 8) ^
|
||||||
|
(self.S[(tt >> 16) & 0xFF] << 16) ^
|
||||||
|
(self.S[(tt >> 24) & 0xFF] << 24))
|
||||||
|
|
||||||
|
for i in xrange(KC // 2 + 1, KC):
|
||||||
|
tk[i] ^= tk[i - 1]
|
||||||
|
|
||||||
|
# Copy values into round key arrays
|
||||||
|
j = 0
|
||||||
|
while j < KC and t < round_key_count:
|
||||||
|
self._Ke[t // 4][t % 4] = tk[j]
|
||||||
|
self._Kd[rounds - (t // 4)][t % 4] = tk[j]
|
||||||
|
j += 1
|
||||||
|
t += 1
|
||||||
|
|
||||||
|
# Inverse-Cipher-ify the decryption round key (fips-197 section 5.3)
|
||||||
|
for r in xrange(1, rounds):
|
||||||
|
for j in xrange(0, 4):
|
||||||
|
tt = self._Kd[r][j]
|
||||||
|
self._Kd[r][j] = (self.U1[(tt >> 24) & 0xFF] ^
|
||||||
|
self.U2[(tt >> 16) & 0xFF] ^
|
||||||
|
self.U3[(tt >> 8) & 0xFF] ^
|
||||||
|
self.U4[ tt & 0xFF])
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
'Encrypt a block of plain text using the AES block cipher.'
|
||||||
|
|
||||||
|
if len(plaintext) != 16:
|
||||||
|
raise ValueError('wrong block length')
|
||||||
|
|
||||||
|
rounds = len(self._Ke) - 1
|
||||||
|
(s1, s2, s3) = [1, 2, 3]
|
||||||
|
a = [0, 0, 0, 0]
|
||||||
|
|
||||||
|
# Convert plaintext to (ints ^ key)
|
||||||
|
t = [(_compact_word(plaintext[4 * i:4 * i + 4]) ^ self._Ke[0][i]) for i in xrange(0, 4)]
|
||||||
|
|
||||||
|
# Apply round transforms
|
||||||
|
for r in xrange(1, rounds):
|
||||||
|
for i in xrange(0, 4):
|
||||||
|
a[i] = (self.T1[(t[ i ] >> 24) & 0xFF] ^
|
||||||
|
self.T2[(t[(i + s1) % 4] >> 16) & 0xFF] ^
|
||||||
|
self.T3[(t[(i + s2) % 4] >> 8) & 0xFF] ^
|
||||||
|
self.T4[ t[(i + s3) % 4] & 0xFF] ^
|
||||||
|
self._Ke[r][i])
|
||||||
|
t = copy.copy(a)
|
||||||
|
|
||||||
|
# The last round is special
|
||||||
|
result = [ ]
|
||||||
|
for i in xrange(0, 4):
|
||||||
|
tt = self._Ke[rounds][i]
|
||||||
|
result.append((self.S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
|
||||||
|
result.append((self.S[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
|
||||||
|
result.append((self.S[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
|
||||||
|
result.append((self.S[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
'Decrypt a block of cipher text using the AES block cipher.'
|
||||||
|
|
||||||
|
if len(ciphertext) != 16:
|
||||||
|
raise ValueError('wrong block length')
|
||||||
|
|
||||||
|
rounds = len(self._Kd) - 1
|
||||||
|
(s1, s2, s3) = [3, 2, 1]
|
||||||
|
a = [0, 0, 0, 0]
|
||||||
|
|
||||||
|
# Convert ciphertext to (ints ^ key)
|
||||||
|
t = [(_compact_word(ciphertext[4 * i:4 * i + 4]) ^ self._Kd[0][i]) for i in xrange(0, 4)]
|
||||||
|
|
||||||
|
# Apply round transforms
|
||||||
|
for r in xrange(1, rounds):
|
||||||
|
for i in xrange(0, 4):
|
||||||
|
a[i] = (self.T5[(t[ i ] >> 24) & 0xFF] ^
|
||||||
|
self.T6[(t[(i + s1) % 4] >> 16) & 0xFF] ^
|
||||||
|
self.T7[(t[(i + s2) % 4] >> 8) & 0xFF] ^
|
||||||
|
self.T8[ t[(i + s3) % 4] & 0xFF] ^
|
||||||
|
self._Kd[r][i])
|
||||||
|
t = copy.copy(a)
|
||||||
|
|
||||||
|
# The last round is special
|
||||||
|
result = [ ]
|
||||||
|
for i in xrange(0, 4):
|
||||||
|
tt = self._Kd[rounds][i]
|
||||||
|
result.append((self.Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
|
||||||
|
result.append((self.Si[(t[(i + s1) % 4] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
|
||||||
|
result.append((self.Si[(t[(i + s2) % 4] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
|
||||||
|
result.append((self.Si[ t[(i + s3) % 4] & 0xFF] ^ tt ) & 0xFF)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class Counter(object):
|
||||||
|
'''A counter object for the Counter (CTR) mode of operation.
|
||||||
|
|
||||||
|
To create a custom counter, you can usually just override the
|
||||||
|
increment method.'''
|
||||||
|
|
||||||
|
def __init__(self, initial_value = 1):
|
||||||
|
|
||||||
|
# Convert the value into an array of bytes long
|
||||||
|
self._counter = [ ((initial_value >> i) % 256) for i in xrange(128 - 8, -1, -8) ]
|
||||||
|
|
||||||
|
value = property(lambda s: s._counter)
|
||||||
|
|
||||||
|
def increment(self):
|
||||||
|
'''Increment the counter (overflow rolls back to 0).'''
|
||||||
|
|
||||||
|
for i in xrange(len(self._counter) - 1, -1, -1):
|
||||||
|
self._counter[i] += 1
|
||||||
|
|
||||||
|
if self._counter[i] < 256: break
|
||||||
|
|
||||||
|
# Carry the one
|
||||||
|
self._counter[i] = 0
|
||||||
|
|
||||||
|
# Overflow
|
||||||
|
else:
|
||||||
|
self._counter = [ 0 ] * len(self._counter)
|
||||||
|
|
||||||
|
|
||||||
|
class AESBlockModeOfOperation(object):
|
||||||
|
'''Super-class for AES modes of operation that require blocks.'''
|
||||||
|
def __init__(self, key):
|
||||||
|
self._aes = AES(key)
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
raise Exception('not implemented')
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
raise Exception('not implemented')
|
||||||
|
|
||||||
|
|
||||||
|
class AESStreamModeOfOperation(AESBlockModeOfOperation):
|
||||||
|
'''Super-class for AES modes of operation that are stream-ciphers.'''
|
||||||
|
|
||||||
|
class AESSegmentModeOfOperation(AESStreamModeOfOperation):
|
||||||
|
'''Super-class for AES modes of operation that segment data.'''
|
||||||
|
|
||||||
|
segment_bytes = 16
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AESModeOfOperationECB(AESBlockModeOfOperation):
|
||||||
|
'''AES Electronic Codebook Mode of Operation.
|
||||||
|
|
||||||
|
o Block-cipher, so data must be padded to 16 byte boundaries
|
||||||
|
|
||||||
|
Security Notes:
|
||||||
|
o This mode is not recommended
|
||||||
|
o Any two identical blocks produce identical encrypted values,
|
||||||
|
exposing data patterns. (See the image of Tux on wikipedia)
|
||||||
|
|
||||||
|
Also see:
|
||||||
|
o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_.28ECB.29
|
||||||
|
o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.1'''
|
||||||
|
|
||||||
|
|
||||||
|
name = "Electronic Codebook (ECB)"
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
if len(plaintext) != 16:
|
||||||
|
raise ValueError('plaintext block must be 16 bytes')
|
||||||
|
|
||||||
|
plaintext = _string_to_bytes(plaintext)
|
||||||
|
return _bytes_to_string(self._aes.encrypt(plaintext))
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
if len(ciphertext) != 16:
|
||||||
|
raise ValueError('ciphertext block must be 16 bytes')
|
||||||
|
|
||||||
|
ciphertext = _string_to_bytes(ciphertext)
|
||||||
|
return _bytes_to_string(self._aes.decrypt(ciphertext))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AESModeOfOperationCBC(AESBlockModeOfOperation):
|
||||||
|
'''AES Cipher-Block Chaining Mode of Operation.
|
||||||
|
|
||||||
|
o The Initialization Vector (IV)
|
||||||
|
o Block-cipher, so data must be padded to 16 byte boundaries
|
||||||
|
o An incorrect initialization vector will only cause the first
|
||||||
|
block to be corrupt; all other blocks will be intact
|
||||||
|
o A corrupt bit in the cipher text will cause a block to be
|
||||||
|
corrupted, and the next block to be inverted, but all other
|
||||||
|
blocks will be intact.
|
||||||
|
|
||||||
|
Security Notes:
|
||||||
|
o This method (and CTR) ARE recommended.
|
||||||
|
|
||||||
|
Also see:
|
||||||
|
o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher-block_chaining_.28CBC.29
|
||||||
|
o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.2'''
|
||||||
|
|
||||||
|
|
||||||
|
name = "Cipher-Block Chaining (CBC)"
|
||||||
|
|
||||||
|
def __init__(self, key, iv = None):
|
||||||
|
if iv is None:
|
||||||
|
self._last_cipherblock = [ 0 ] * 16
|
||||||
|
elif len(iv) != 16:
|
||||||
|
raise ValueError('initialization vector must be 16 bytes')
|
||||||
|
else:
|
||||||
|
self._last_cipherblock = _string_to_bytes(iv)
|
||||||
|
|
||||||
|
AESBlockModeOfOperation.__init__(self, key)
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
if len(plaintext) != 16:
|
||||||
|
raise ValueError('plaintext block must be 16 bytes')
|
||||||
|
|
||||||
|
plaintext = _string_to_bytes(plaintext)
|
||||||
|
precipherblock = [ (p ^ l) for (p, l) in zip(plaintext, self._last_cipherblock) ]
|
||||||
|
self._last_cipherblock = self._aes.encrypt(precipherblock)
|
||||||
|
|
||||||
|
return _bytes_to_string(self._last_cipherblock)
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
if len(ciphertext) != 16:
|
||||||
|
raise ValueError('ciphertext block must be 16 bytes')
|
||||||
|
|
||||||
|
cipherblock = _string_to_bytes(ciphertext)
|
||||||
|
plaintext = [ (p ^ l) for (p, l) in zip(self._aes.decrypt(cipherblock), self._last_cipherblock) ]
|
||||||
|
self._last_cipherblock = cipherblock
|
||||||
|
|
||||||
|
return _bytes_to_string(plaintext)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AESModeOfOperationCFB(AESSegmentModeOfOperation):
|
||||||
|
'''AES Cipher Feedback Mode of Operation.
|
||||||
|
|
||||||
|
o A stream-cipher, so input does not need to be padded to blocks,
|
||||||
|
but does need to be padded to segment_size
|
||||||
|
|
||||||
|
Also see:
|
||||||
|
o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_.28CFB.29
|
||||||
|
o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.3'''
|
||||||
|
|
||||||
|
|
||||||
|
name = "Cipher Feedback (CFB)"
|
||||||
|
|
||||||
|
def __init__(self, key, iv, segment_size = 1):
|
||||||
|
if segment_size == 0: segment_size = 1
|
||||||
|
|
||||||
|
if iv is None:
|
||||||
|
self._shift_register = [ 0 ] * 16
|
||||||
|
elif len(iv) != 16:
|
||||||
|
raise ValueError('initialization vector must be 16 bytes')
|
||||||
|
else:
|
||||||
|
self._shift_register = _string_to_bytes(iv)
|
||||||
|
|
||||||
|
self._segment_bytes = segment_size
|
||||||
|
|
||||||
|
AESBlockModeOfOperation.__init__(self, key)
|
||||||
|
|
||||||
|
segment_bytes = property(lambda s: s._segment_bytes)
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
if len(plaintext) % self._segment_bytes != 0:
|
||||||
|
raise ValueError('plaintext block must be a multiple of segment_size')
|
||||||
|
|
||||||
|
plaintext = _string_to_bytes(plaintext)
|
||||||
|
|
||||||
|
# Break block into segments
|
||||||
|
encrypted = [ ]
|
||||||
|
for i in xrange(0, len(plaintext), self._segment_bytes):
|
||||||
|
plaintext_segment = plaintext[i: i + self._segment_bytes]
|
||||||
|
xor_segment = self._aes.encrypt(self._shift_register)[:len(plaintext_segment)]
|
||||||
|
cipher_segment = [ (p ^ x) for (p, x) in zip(plaintext_segment, xor_segment) ]
|
||||||
|
|
||||||
|
# Shift the top bits out and the ciphertext in
|
||||||
|
self._shift_register = _concat_list(self._shift_register[len(cipher_segment):], cipher_segment)
|
||||||
|
|
||||||
|
encrypted.extend(cipher_segment)
|
||||||
|
|
||||||
|
return _bytes_to_string(encrypted)
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
if len(ciphertext) % self._segment_bytes != 0:
|
||||||
|
raise ValueError('ciphertext block must be a multiple of segment_size')
|
||||||
|
|
||||||
|
ciphertext = _string_to_bytes(ciphertext)
|
||||||
|
|
||||||
|
# Break block into segments
|
||||||
|
decrypted = [ ]
|
||||||
|
for i in xrange(0, len(ciphertext), self._segment_bytes):
|
||||||
|
cipher_segment = ciphertext[i: i + self._segment_bytes]
|
||||||
|
xor_segment = self._aes.encrypt(self._shift_register)[:len(cipher_segment)]
|
||||||
|
plaintext_segment = [ (p ^ x) for (p, x) in zip(cipher_segment, xor_segment) ]
|
||||||
|
|
||||||
|
# Shift the top bits out and the ciphertext in
|
||||||
|
self._shift_register = _concat_list(self._shift_register[len(cipher_segment):], cipher_segment)
|
||||||
|
|
||||||
|
decrypted.extend(plaintext_segment)
|
||||||
|
|
||||||
|
return _bytes_to_string(decrypted)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AESModeOfOperationOFB(AESStreamModeOfOperation):
|
||||||
|
'''AES Output Feedback Mode of Operation.
|
||||||
|
|
||||||
|
o A stream-cipher, so input does not need to be padded to blocks,
|
||||||
|
allowing arbitrary length data.
|
||||||
|
o A bit twiddled in the cipher text, twiddles the same bit in the
|
||||||
|
same bit in the plain text, which can be useful for error
|
||||||
|
correction techniques.
|
||||||
|
|
||||||
|
Also see:
|
||||||
|
o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_.28OFB.29
|
||||||
|
o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.4'''
|
||||||
|
|
||||||
|
|
||||||
|
name = "Output Feedback (OFB)"
|
||||||
|
|
||||||
|
def __init__(self, key, iv = None):
|
||||||
|
if iv is None:
|
||||||
|
self._last_precipherblock = [ 0 ] * 16
|
||||||
|
elif len(iv) != 16:
|
||||||
|
raise ValueError('initialization vector must be 16 bytes')
|
||||||
|
else:
|
||||||
|
self._last_precipherblock = _string_to_bytes(iv)
|
||||||
|
|
||||||
|
self._remaining_block = [ ]
|
||||||
|
|
||||||
|
AESBlockModeOfOperation.__init__(self, key)
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
encrypted = [ ]
|
||||||
|
for p in _string_to_bytes(plaintext):
|
||||||
|
if len(self._remaining_block) == 0:
|
||||||
|
self._remaining_block = self._aes.encrypt(self._last_precipherblock)
|
||||||
|
self._last_precipherblock = [ ]
|
||||||
|
precipherbyte = self._remaining_block.pop(0)
|
||||||
|
self._last_precipherblock.append(precipherbyte)
|
||||||
|
cipherbyte = p ^ precipherbyte
|
||||||
|
encrypted.append(cipherbyte)
|
||||||
|
|
||||||
|
return _bytes_to_string(encrypted)
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
# AES-OFB is symetric
|
||||||
|
return self.encrypt(ciphertext)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AESModeOfOperationCTR(AESStreamModeOfOperation):
|
||||||
|
'''AES Counter Mode of Operation.
|
||||||
|
|
||||||
|
o A stream-cipher, so input does not need to be padded to blocks,
|
||||||
|
allowing arbitrary length data.
|
||||||
|
o The counter must be the same size as the key size (ie. len(key))
|
||||||
|
o Each block independant of the other, so a corrupt byte will not
|
||||||
|
damage future blocks.
|
||||||
|
o Each block has a uniue counter value associated with it, which
|
||||||
|
contributes to the encrypted value, so no data patterns are
|
||||||
|
leaked.
|
||||||
|
o Also known as: Counter Mode (CM), Integer Counter Mode (ICM) and
|
||||||
|
Segmented Integer Counter (SIC
|
||||||
|
|
||||||
|
Security Notes:
|
||||||
|
o This method (and CBC) ARE recommended.
|
||||||
|
o Each message block is associated with a counter value which must be
|
||||||
|
unique for ALL messages with the same key. Otherwise security may be
|
||||||
|
compromised.
|
||||||
|
|
||||||
|
Also see:
|
||||||
|
|
||||||
|
o https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
|
||||||
|
o See NIST SP800-38A (http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf); section 6.5
|
||||||
|
and Appendix B for managing the initial counter'''
|
||||||
|
|
||||||
|
|
||||||
|
name = "Counter (CTR)"
|
||||||
|
|
||||||
|
def __init__(self, key, counter = None):
|
||||||
|
AESBlockModeOfOperation.__init__(self, key)
|
||||||
|
|
||||||
|
if counter is None:
|
||||||
|
counter = Counter()
|
||||||
|
|
||||||
|
self._counter = counter
|
||||||
|
self._remaining_counter = [ ]
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
while len(self._remaining_counter) < len(plaintext):
|
||||||
|
self._remaining_counter += self._aes.encrypt(self._counter.value)
|
||||||
|
self._counter.increment()
|
||||||
|
|
||||||
|
plaintext = _string_to_bytes(plaintext)
|
||||||
|
|
||||||
|
encrypted = [ (p ^ c) for (p, c) in zip(plaintext, self._remaining_counter) ]
|
||||||
|
self._remaining_counter = self._remaining_counter[len(encrypted):]
|
||||||
|
|
||||||
|
return _bytes_to_string(encrypted)
|
||||||
|
|
||||||
|
def decrypt(self, crypttext):
|
||||||
|
# AES-CTR is symetric
|
||||||
|
return self.encrypt(crypttext)
|
||||||
|
|
||||||
|
|
||||||
|
# Simple lookup table for each mode
|
||||||
|
AESModesOfOperation = dict(
|
||||||
|
ctr = AESModeOfOperationCTR,
|
||||||
|
cbc = AESModeOfOperationCBC,
|
||||||
|
cfb = AESModeOfOperationCFB,
|
||||||
|
ecb = AESModeOfOperationECB,
|
||||||
|
ofb = AESModeOfOperationOFB,
|
||||||
|
)
|
|
@ -0,0 +1,227 @@
|
||||||
|
# The MIT License (MIT)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2014 Richard Moore
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
from .aes import AESBlockModeOfOperation, AESSegmentModeOfOperation, AESStreamModeOfOperation
|
||||||
|
from .util import append_PKCS7_padding, strip_PKCS7_padding, to_bufferable
|
||||||
|
|
||||||
|
|
||||||
|
# First we inject three functions to each of the modes of operations
|
||||||
|
#
|
||||||
|
# _can_consume(size)
|
||||||
|
# - Given a size, determine how many bytes could be consumed in
|
||||||
|
# a single call to either the decrypt or encrypt method
|
||||||
|
#
|
||||||
|
# _final_encrypt(data, padding = PADDING_DEFAULT)
|
||||||
|
# - call and return encrypt on this (last) chunk of data,
|
||||||
|
# padding as necessary; this will always be at least 16
|
||||||
|
# bytes unless the total incoming input was less than 16
|
||||||
|
# bytes
|
||||||
|
#
|
||||||
|
# _final_decrypt(data, padding = PADDING_DEFAULT)
|
||||||
|
# - same as _final_encrypt except for decrypt, for
|
||||||
|
# stripping off padding
|
||||||
|
#
|
||||||
|
|
||||||
|
PADDING_NONE = 'none'
|
||||||
|
PADDING_DEFAULT = 'default'
|
||||||
|
|
||||||
|
# @TODO: Ciphertext stealing and explicit PKCS#7
|
||||||
|
# PADDING_CIPHERTEXT_STEALING
|
||||||
|
# PADDING_PKCS7
|
||||||
|
|
||||||
|
# ECB and CBC are block-only ciphers
|
||||||
|
|
||||||
|
def _block_can_consume(self, size):
|
||||||
|
if size >= 16: return 16
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# After padding, we may have more than one block
|
||||||
|
def _block_final_encrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding == PADDING_DEFAULT:
|
||||||
|
data = append_PKCS7_padding(data)
|
||||||
|
|
||||||
|
elif padding == PADDING_NONE:
|
||||||
|
if len(data) != 16:
|
||||||
|
raise Exception('invalid data length for final block')
|
||||||
|
else:
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
if len(data) == 32:
|
||||||
|
return self.encrypt(data[:16]) + self.encrypt(data[16:])
|
||||||
|
|
||||||
|
return self.encrypt(data)
|
||||||
|
|
||||||
|
|
||||||
|
def _block_final_decrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding == PADDING_DEFAULT:
|
||||||
|
return strip_PKCS7_padding(self.decrypt(data))
|
||||||
|
|
||||||
|
if padding == PADDING_NONE:
|
||||||
|
if len(data) != 16:
|
||||||
|
raise Exception('invalid data length for final block')
|
||||||
|
return self.decrypt(data)
|
||||||
|
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
AESBlockModeOfOperation._can_consume = _block_can_consume
|
||||||
|
AESBlockModeOfOperation._final_encrypt = _block_final_encrypt
|
||||||
|
AESBlockModeOfOperation._final_decrypt = _block_final_decrypt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# CFB is a segment cipher
|
||||||
|
|
||||||
|
def _segment_can_consume(self, size):
|
||||||
|
return self.segment_bytes * int(size // self.segment_bytes)
|
||||||
|
|
||||||
|
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
|
||||||
|
def _segment_final_encrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding != PADDING_DEFAULT:
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
|
||||||
|
padded = data + to_bufferable(faux_padding)
|
||||||
|
return self.encrypt(padded)[:len(data)]
|
||||||
|
|
||||||
|
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
|
||||||
|
def _segment_final_decrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding != PADDING_DEFAULT:
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
|
||||||
|
padded = data + to_bufferable(faux_padding)
|
||||||
|
return self.decrypt(padded)[:len(data)]
|
||||||
|
|
||||||
|
AESSegmentModeOfOperation._can_consume = _segment_can_consume
|
||||||
|
AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt
|
||||||
|
AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# OFB and CTR are stream ciphers
|
||||||
|
|
||||||
|
def _stream_can_consume(self, size):
|
||||||
|
return size
|
||||||
|
|
||||||
|
def _stream_final_encrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
return self.encrypt(data)
|
||||||
|
|
||||||
|
def _stream_final_decrypt(self, data, padding = PADDING_DEFAULT):
|
||||||
|
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
|
||||||
|
raise Exception('invalid padding option')
|
||||||
|
|
||||||
|
return self.decrypt(data)
|
||||||
|
|
||||||
|
AESStreamModeOfOperation._can_consume = _stream_can_consume
|
||||||
|
AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt
|
||||||
|
AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BlockFeeder(object):
|
||||||
|
'''The super-class for objects to handle chunking a stream of bytes
|
||||||
|
into the appropriate block size for the underlying mode of operation
|
||||||
|
and applying (or stripping) padding, as necessary.'''
|
||||||
|
|
||||||
|
def __init__(self, mode, feed, final, padding = PADDING_DEFAULT):
|
||||||
|
self._mode = mode
|
||||||
|
self._feed = feed
|
||||||
|
self._final = final
|
||||||
|
self._buffer = to_bufferable("")
|
||||||
|
self._padding = padding
|
||||||
|
|
||||||
|
def feed(self, data = None):
|
||||||
|
'''Provide bytes to encrypt (or decrypt), returning any bytes
|
||||||
|
possible from this or any previous calls to feed.
|
||||||
|
|
||||||
|
Call with None or an empty string to flush the mode of
|
||||||
|
operation and return any final bytes; no further calls to
|
||||||
|
feed may be made.'''
|
||||||
|
|
||||||
|
if self._buffer is None:
|
||||||
|
raise ValueError('already finished feeder')
|
||||||
|
|
||||||
|
# Finalize; process the spare bytes we were keeping
|
||||||
|
if data is None:
|
||||||
|
result = self._final(self._buffer, self._padding)
|
||||||
|
self._buffer = None
|
||||||
|
return result
|
||||||
|
|
||||||
|
self._buffer += to_bufferable(data)
|
||||||
|
|
||||||
|
# We keep 16 bytes around so we can determine padding
|
||||||
|
result = to_bufferable('')
|
||||||
|
while len(self._buffer) > 16:
|
||||||
|
can_consume = self._mode._can_consume(len(self._buffer) - 16)
|
||||||
|
if can_consume == 0: break
|
||||||
|
result += self._feed(self._buffer[:can_consume])
|
||||||
|
self._buffer = self._buffer[can_consume:]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class Encrypter(BlockFeeder):
|
||||||
|
'Accepts bytes of plaintext and returns encrypted ciphertext.'
|
||||||
|
|
||||||
|
def __init__(self, mode, padding = PADDING_DEFAULT):
|
||||||
|
BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt, padding)
|
||||||
|
|
||||||
|
|
||||||
|
class Decrypter(BlockFeeder):
|
||||||
|
'Accepts bytes of ciphertext and returns decrypted plaintext.'
|
||||||
|
|
||||||
|
def __init__(self, mode, padding = PADDING_DEFAULT):
|
||||||
|
BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt, padding)
|
||||||
|
|
||||||
|
|
||||||
|
# 8kb blocks
|
||||||
|
BLOCK_SIZE = (1 << 13)
|
||||||
|
|
||||||
|
def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE):
|
||||||
|
'Uses feeder to read and convert from in_stream and write to out_stream.'
|
||||||
|
|
||||||
|
while True:
|
||||||
|
chunk = in_stream.read(block_size)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
converted = feeder.feed(chunk)
|
||||||
|
out_stream.write(converted)
|
||||||
|
converted = feeder.feed()
|
||||||
|
out_stream.write(converted)
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
|
||||||
|
'Encrypts a stream of bytes from in_stream to out_stream using mode.'
|
||||||
|
|
||||||
|
encrypter = Encrypter(mode, padding = padding)
|
||||||
|
_feed_stream(encrypter, in_stream, out_stream, block_size)
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
|
||||||
|
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
|
||||||
|
|
||||||
|
decrypter = Decrypter(mode, padding = padding)
|
||||||
|
_feed_stream(decrypter, in_stream, out_stream, block_size)
|
|
@ -0,0 +1,60 @@
|
||||||
|
# The MIT License (MIT)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2014 Richard Moore
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
# THE SOFTWARE.
|
||||||
|
|
||||||
|
# Why to_bufferable?
|
||||||
|
# Python 3 is very different from Python 2.x when it comes to strings of text
|
||||||
|
# and strings of bytes; in Python 3, strings of bytes do not exist, instead to
|
||||||
|
# represent arbitrary binary data, we must use the "bytes" object. This method
|
||||||
|
# ensures the object behaves as we need it to.
|
||||||
|
|
||||||
|
def to_bufferable(binary):
|
||||||
|
return binary
|
||||||
|
|
||||||
|
def _get_byte(c):
|
||||||
|
return ord(c)
|
||||||
|
|
||||||
|
try:
|
||||||
|
xrange
|
||||||
|
except:
|
||||||
|
|
||||||
|
def to_bufferable(binary):
|
||||||
|
if isinstance(binary, bytes):
|
||||||
|
return binary
|
||||||
|
return bytes(ord(b) for b in binary)
|
||||||
|
|
||||||
|
def _get_byte(c):
|
||||||
|
return c
|
||||||
|
|
||||||
|
def append_PKCS7_padding(data):
|
||||||
|
pad = 16 - (len(data) % 16)
|
||||||
|
return data + to_bufferable(chr(pad) * pad)
|
||||||
|
|
||||||
|
def strip_PKCS7_padding(data):
|
||||||
|
if len(data) % 16 != 0:
|
||||||
|
raise ValueError("invalid length")
|
||||||
|
|
||||||
|
pad = _get_byte(data[-1])
|
||||||
|
|
||||||
|
if pad > 16:
|
||||||
|
raise ValueError("invalid padding byte")
|
||||||
|
|
||||||
|
return data[:-pad]
|
|
@ -0,0 +1,27 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Ivan Machugovskiy
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
Additionally, the following licenses must be preserved:
|
||||||
|
|
||||||
|
- ripemd implementation is licensed under BSD-3 by Markus Friedl, see `_ripemd.py`;
|
||||||
|
- jacobian curve implementation is dual-licensed under MIT or public domain license, see `_jacobian.py`.
|
|
@ -0,0 +1,6 @@
|
||||||
|
__all__ = ["aes", "ecc", "rsa"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
from .openssl import aes, ecc, rsa
|
||||||
|
except OSError:
|
||||||
|
from .fallback import aes, ecc, rsa
|
|
@ -0,0 +1,53 @@
|
||||||
|
# pylint: disable=import-outside-toplevel
|
||||||
|
|
||||||
|
class AES:
|
||||||
|
def __init__(self, backend, fallback=None):
|
||||||
|
self._backend = backend
|
||||||
|
self._fallback = fallback
|
||||||
|
|
||||||
|
|
||||||
|
def get_algo_key_length(self, algo):
|
||||||
|
if algo.count("-") != 2:
|
||||||
|
raise ValueError("Invalid algorithm name")
|
||||||
|
try:
|
||||||
|
return int(algo.split("-")[1]) // 8
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError("Invalid algorithm name") from None
|
||||||
|
|
||||||
|
|
||||||
|
def new_key(self, algo="aes-256-cbc"):
|
||||||
|
if not self._backend.is_algo_supported(algo):
|
||||||
|
if self._fallback is None:
|
||||||
|
raise ValueError("This algorithm is not supported")
|
||||||
|
return self._fallback.new_key(algo)
|
||||||
|
return self._backend.random(self.get_algo_key_length(algo))
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt(self, data, key, algo="aes-256-cbc"):
|
||||||
|
if not self._backend.is_algo_supported(algo):
|
||||||
|
if self._fallback is None:
|
||||||
|
raise ValueError("This algorithm is not supported")
|
||||||
|
return self._fallback.encrypt(data, key, algo)
|
||||||
|
|
||||||
|
key_length = self.get_algo_key_length(algo)
|
||||||
|
if len(key) != key_length:
|
||||||
|
raise ValueError("Expected key to be {} bytes, got {} bytes".format(key_length, len(key)))
|
||||||
|
|
||||||
|
return self._backend.encrypt(data, key, algo)
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext, iv, key, algo="aes-256-cbc"):
|
||||||
|
if not self._backend.is_algo_supported(algo):
|
||||||
|
if self._fallback is None:
|
||||||
|
raise ValueError("This algorithm is not supported")
|
||||||
|
return self._fallback.decrypt(ciphertext, iv, key, algo)
|
||||||
|
|
||||||
|
key_length = self.get_algo_key_length(algo)
|
||||||
|
if len(key) != key_length:
|
||||||
|
raise ValueError("Expected key to be {} bytes, got {} bytes".format(key_length, len(key)))
|
||||||
|
|
||||||
|
return self._backend.decrypt(ciphertext, iv, key, algo)
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend(self):
|
||||||
|
return self._backend.get_backend()
|
|
@ -0,0 +1,506 @@
|
||||||
|
import hashlib
|
||||||
|
import struct
|
||||||
|
import hmac
|
||||||
|
import base58
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
hashlib.new("ripemd160")
|
||||||
|
except ValueError:
|
||||||
|
# No native implementation
|
||||||
|
from . import _ripemd
|
||||||
|
def ripemd160(*args):
|
||||||
|
return _ripemd.new(*args)
|
||||||
|
else:
|
||||||
|
# Use OpenSSL
|
||||||
|
def ripemd160(*args):
|
||||||
|
return hashlib.new("ripemd160", *args)
|
||||||
|
|
||||||
|
|
||||||
|
class ECC:
|
||||||
|
# pylint: disable=line-too-long
|
||||||
|
# name: (nid, p, n, a, b, (Gx, Gy)),
|
||||||
|
CURVES = {
|
||||||
|
"secp112r1": (
|
||||||
|
704,
|
||||||
|
0xDB7C2ABF62E35E668076BEAD208B,
|
||||||
|
0xDB7C2ABF62E35E7628DFAC6561C5,
|
||||||
|
0xDB7C2ABF62E35E668076BEAD2088,
|
||||||
|
0x659EF8BA043916EEDE8911702B22,
|
||||||
|
(
|
||||||
|
0x09487239995A5EE76B55F9C2F098,
|
||||||
|
0xA89CE5AF8724C0A23E0E0FF77500
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp112r2": (
|
||||||
|
705,
|
||||||
|
0xDB7C2ABF62E35E668076BEAD208B,
|
||||||
|
0x36DF0AAFD8B8D7597CA10520D04B,
|
||||||
|
0x6127C24C05F38A0AAAF65C0EF02C,
|
||||||
|
0x51DEF1815DB5ED74FCC34C85D709,
|
||||||
|
(
|
||||||
|
0x4BA30AB5E892B4E1649DD0928643,
|
||||||
|
0xADCD46F5882E3747DEF36E956E97
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp128r1": (
|
||||||
|
706,
|
||||||
|
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF,
|
||||||
|
0xFFFFFFFE0000000075A30D1B9038A115,
|
||||||
|
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC,
|
||||||
|
0xE87579C11079F43DD824993C2CEE5ED3,
|
||||||
|
(
|
||||||
|
0x161FF7528B899B2D0C28607CA52C5B86,
|
||||||
|
0xCF5AC8395BAFEB13C02DA292DDED7A83
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp128r2": (
|
||||||
|
707,
|
||||||
|
0xFFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF,
|
||||||
|
0x3FFFFFFF7FFFFFFFBE0024720613B5A3,
|
||||||
|
0xD6031998D1B3BBFEBF59CC9BBFF9AEE1,
|
||||||
|
0x5EEEFCA380D02919DC2C6558BB6D8A5D,
|
||||||
|
(
|
||||||
|
0x7B6AA5D85E572983E6FB32A7CDEBC140,
|
||||||
|
0x27B6916A894D3AEE7106FE805FC34B44
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp160k1": (
|
||||||
|
708,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73,
|
||||||
|
0x0100000000000000000001B8FA16DFAB9ACA16B6B3,
|
||||||
|
0,
|
||||||
|
7,
|
||||||
|
(
|
||||||
|
0x3B4C382CE37AA192A4019E763036F4F5DD4D7EBB,
|
||||||
|
0x938CF935318FDCED6BC28286531733C3F03C4FEE
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp160r1": (
|
||||||
|
709,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF,
|
||||||
|
0x0100000000000000000001F4C8F927AED3CA752257,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC,
|
||||||
|
0x001C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45,
|
||||||
|
(
|
||||||
|
0x4A96B5688EF573284664698968C38BB913CBFC82,
|
||||||
|
0x23A628553168947D59DCC912042351377AC5FB32
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp160r2": (
|
||||||
|
710,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73,
|
||||||
|
0x0100000000000000000000351EE786A818F3A1A16B,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70,
|
||||||
|
0x00B4E134D3FB59EB8BAB57274904664D5AF50388BA,
|
||||||
|
(
|
||||||
|
0x52DCB034293A117E1F4FF11B30F7199D3144CE6D,
|
||||||
|
0xFEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp192k1": (
|
||||||
|
711,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D,
|
||||||
|
0,
|
||||||
|
3,
|
||||||
|
(
|
||||||
|
0xDB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D,
|
||||||
|
0x9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"prime192v1": (
|
||||||
|
409,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC,
|
||||||
|
0x64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1,
|
||||||
|
(
|
||||||
|
0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012,
|
||||||
|
0x07192B95FFC8DA78631011ED6B24CDD573F977A11E794811
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp224k1": (
|
||||||
|
712,
|
||||||
|
0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D,
|
||||||
|
0x010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7,
|
||||||
|
0,
|
||||||
|
5,
|
||||||
|
(
|
||||||
|
0xA1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C,
|
||||||
|
0x7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp224r1": (
|
||||||
|
713,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE,
|
||||||
|
0xB4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4,
|
||||||
|
(
|
||||||
|
0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21,
|
||||||
|
0xBD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp256k1": (
|
||||||
|
714,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,
|
||||||
|
0,
|
||||||
|
7,
|
||||||
|
(
|
||||||
|
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798,
|
||||||
|
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"prime256v1": (
|
||||||
|
715,
|
||||||
|
0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF,
|
||||||
|
0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551,
|
||||||
|
0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC,
|
||||||
|
0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B,
|
||||||
|
(
|
||||||
|
0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,
|
||||||
|
0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp384r1": (
|
||||||
|
716,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973,
|
||||||
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFC,
|
||||||
|
0xB3312FA7E23EE7E4988E056BE3F82D19181D9C6EFE8141120314088F5013875AC656398D8A2ED19D2A85C8EDD3EC2AEF,
|
||||||
|
(
|
||||||
|
0xAA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB7,
|
||||||
|
0x3617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"secp521r1": (
|
||||||
|
717,
|
||||||
|
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
|
||||||
|
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409,
|
||||||
|
0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC,
|
||||||
|
0x0051953EB9618E1C9A1F929A21A0B68540EEA2DA725B99B315F3B8B489918EF109E156193951EC7E937B1652C0BD3BB1BF073573DF883D2C34F1EF451FD46B503F00,
|
||||||
|
(
|
||||||
|
0x00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66,
|
||||||
|
0x011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
# pylint: enable=line-too-long
|
||||||
|
|
||||||
|
def __init__(self, backend, aes):
|
||||||
|
self._backend = backend
|
||||||
|
self._aes = aes
|
||||||
|
|
||||||
|
|
||||||
|
def get_curve(self, name):
|
||||||
|
if name not in self.CURVES:
|
||||||
|
raise ValueError("Unknown curve {}".format(name))
|
||||||
|
nid, p, n, a, b, g = self.CURVES[name]
|
||||||
|
return EllipticCurve(self._backend(p, n, a, b, g), self._aes, nid)
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend(self):
|
||||||
|
return self._backend.get_backend()
|
||||||
|
|
||||||
|
|
||||||
|
class EllipticCurve:
|
||||||
|
def __init__(self, backend, aes, nid):
|
||||||
|
self._backend = backend
|
||||||
|
self._aes = aes
|
||||||
|
self.nid = nid
|
||||||
|
|
||||||
|
|
||||||
|
def _encode_public_key(self, x, y, is_compressed=True, raw=True):
|
||||||
|
if raw:
|
||||||
|
if is_compressed:
|
||||||
|
return bytes([0x02 + (y[-1] % 2)]) + x
|
||||||
|
else:
|
||||||
|
return bytes([0x04]) + x + y
|
||||||
|
else:
|
||||||
|
return struct.pack("!HH", self.nid, len(x)) + x + struct.pack("!H", len(y)) + y
|
||||||
|
|
||||||
|
|
||||||
|
def _decode_public_key(self, public_key, partial=False):
|
||||||
|
if not public_key:
|
||||||
|
raise ValueError("No public key")
|
||||||
|
|
||||||
|
if public_key[0] == 0x04:
|
||||||
|
# Uncompressed
|
||||||
|
expected_length = 1 + 2 * self._backend.public_key_length
|
||||||
|
if partial:
|
||||||
|
if len(public_key) < expected_length:
|
||||||
|
raise ValueError("Invalid uncompressed public key length")
|
||||||
|
else:
|
||||||
|
if len(public_key) != expected_length:
|
||||||
|
raise ValueError("Invalid uncompressed public key length")
|
||||||
|
x = public_key[1:1 + self._backend.public_key_length]
|
||||||
|
y = public_key[1 + self._backend.public_key_length:expected_length]
|
||||||
|
if partial:
|
||||||
|
return (x, y), expected_length
|
||||||
|
else:
|
||||||
|
return x, y
|
||||||
|
elif public_key[0] in (0x02, 0x03):
|
||||||
|
# Compressed
|
||||||
|
expected_length = 1 + self._backend.public_key_length
|
||||||
|
if partial:
|
||||||
|
if len(public_key) < expected_length:
|
||||||
|
raise ValueError("Invalid compressed public key length")
|
||||||
|
else:
|
||||||
|
if len(public_key) != expected_length:
|
||||||
|
raise ValueError("Invalid compressed public key length")
|
||||||
|
|
||||||
|
x, y = self._backend.decompress_point(public_key[:expected_length])
|
||||||
|
# Sanity check
|
||||||
|
if x != public_key[1:expected_length]:
|
||||||
|
raise ValueError("Incorrect compressed public key")
|
||||||
|
if partial:
|
||||||
|
return (x, y), expected_length
|
||||||
|
else:
|
||||||
|
return x, y
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid public key prefix")
|
||||||
|
|
||||||
|
|
||||||
|
def _decode_public_key_openssl(self, public_key, partial=False):
|
||||||
|
if not public_key:
|
||||||
|
raise ValueError("No public key")
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
nid, = struct.unpack("!H", public_key[i:i + 2])
|
||||||
|
i += 2
|
||||||
|
if nid != self.nid:
|
||||||
|
raise ValueError("Wrong curve")
|
||||||
|
|
||||||
|
xlen, = struct.unpack("!H", public_key[i:i + 2])
|
||||||
|
i += 2
|
||||||
|
if len(public_key) - i < xlen:
|
||||||
|
raise ValueError("Too short public key")
|
||||||
|
x = public_key[i:i + xlen]
|
||||||
|
i += xlen
|
||||||
|
|
||||||
|
ylen, = struct.unpack("!H", public_key[i:i + 2])
|
||||||
|
i += 2
|
||||||
|
if len(public_key) - i < ylen:
|
||||||
|
raise ValueError("Too short public key")
|
||||||
|
y = public_key[i:i + ylen]
|
||||||
|
i += ylen
|
||||||
|
|
||||||
|
if partial:
|
||||||
|
return (x, y), i
|
||||||
|
else:
|
||||||
|
if i < len(public_key):
|
||||||
|
raise ValueError("Too long public key")
|
||||||
|
return x, y
|
||||||
|
|
||||||
|
|
||||||
|
def new_private_key(self, is_compressed=False):
|
||||||
|
return self._backend.new_private_key() + (b"\x01" if is_compressed else b"")
|
||||||
|
|
||||||
|
|
||||||
|
def private_to_public(self, private_key):
|
||||||
|
if len(private_key) == self._backend.public_key_length:
|
||||||
|
is_compressed = False
|
||||||
|
elif len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
|
||||||
|
is_compressed = True
|
||||||
|
private_key = private_key[:-1]
|
||||||
|
else:
|
||||||
|
raise ValueError("Private key has invalid length")
|
||||||
|
x, y = self._backend.private_to_public(private_key)
|
||||||
|
return self._encode_public_key(x, y, is_compressed=is_compressed)
|
||||||
|
|
||||||
|
|
||||||
|
def private_to_wif(self, private_key):
|
||||||
|
return base58.b58encode_check(b"\x80" + private_key)
|
||||||
|
|
||||||
|
|
||||||
|
def wif_to_private(self, wif):
|
||||||
|
dec = base58.b58decode_check(wif)
|
||||||
|
if dec[0] != 0x80:
|
||||||
|
raise ValueError("Invalid network (expected mainnet)")
|
||||||
|
return dec[1:]
|
||||||
|
|
||||||
|
|
||||||
|
def public_to_address(self, public_key):
|
||||||
|
h = hashlib.sha256(public_key).digest()
|
||||||
|
hash160 = ripemd160(h).digest()
|
||||||
|
return base58.b58encode_check(b"\x00" + hash160)
|
||||||
|
|
||||||
|
|
||||||
|
def private_to_address(self, private_key):
|
||||||
|
# Kinda useless but left for quick migration from pybitcointools
|
||||||
|
return self.public_to_address(self.private_to_public(private_key))
|
||||||
|
|
||||||
|
|
||||||
|
def derive(self, private_key, public_key):
|
||||||
|
if len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
|
||||||
|
private_key = private_key[:-1]
|
||||||
|
if len(private_key) != self._backend.public_key_length:
|
||||||
|
raise ValueError("Private key has invalid length")
|
||||||
|
if not isinstance(public_key, tuple):
|
||||||
|
public_key = self._decode_public_key(public_key)
|
||||||
|
return self._backend.ecdh(private_key, public_key)
|
||||||
|
|
||||||
|
|
||||||
|
def _digest(self, data, hash):
|
||||||
|
if hash is None:
|
||||||
|
return data
|
||||||
|
elif callable(hash):
|
||||||
|
return hash(data)
|
||||||
|
elif hash == "sha1":
|
||||||
|
return hashlib.sha1(data).digest()
|
||||||
|
elif hash == "sha256":
|
||||||
|
return hashlib.sha256(data).digest()
|
||||||
|
elif hash == "sha512":
|
||||||
|
return hashlib.sha512(data).digest()
|
||||||
|
else:
|
||||||
|
raise ValueError("Unknown hash/derivation method")
|
||||||
|
|
||||||
|
|
||||||
|
# High-level functions
|
||||||
|
def encrypt(self, data, public_key, algo="aes-256-cbc", derivation="sha256", mac="hmac-sha256", return_aes_key=False):
|
||||||
|
# Generate ephemeral private key
|
||||||
|
private_key = self.new_private_key()
|
||||||
|
|
||||||
|
# Derive key
|
||||||
|
ecdh = self.derive(private_key, public_key)
|
||||||
|
key = self._digest(ecdh, derivation)
|
||||||
|
k_enc_len = self._aes.get_algo_key_length(algo)
|
||||||
|
if len(key) < k_enc_len:
|
||||||
|
raise ValueError("Too short digest")
|
||||||
|
k_enc, k_mac = key[:k_enc_len], key[k_enc_len:]
|
||||||
|
|
||||||
|
# Encrypt
|
||||||
|
ciphertext, iv = self._aes.encrypt(data, k_enc, algo=algo)
|
||||||
|
ephem_public_key = self.private_to_public(private_key)
|
||||||
|
ephem_public_key = self._decode_public_key(ephem_public_key)
|
||||||
|
ephem_public_key = self._encode_public_key(*ephem_public_key, raw=False)
|
||||||
|
ciphertext = iv + ephem_public_key + ciphertext
|
||||||
|
|
||||||
|
# Add MAC tag
|
||||||
|
if callable(mac):
|
||||||
|
tag = mac(k_mac, ciphertext)
|
||||||
|
elif mac == "hmac-sha256":
|
||||||
|
h = hmac.new(k_mac, digestmod="sha256")
|
||||||
|
h.update(ciphertext)
|
||||||
|
tag = h.digest()
|
||||||
|
elif mac == "hmac-sha512":
|
||||||
|
h = hmac.new(k_mac, digestmod="sha512")
|
||||||
|
h.update(ciphertext)
|
||||||
|
tag = h.digest()
|
||||||
|
elif mac is None:
|
||||||
|
tag = b""
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported MAC")
|
||||||
|
|
||||||
|
if return_aes_key:
|
||||||
|
return ciphertext + tag, k_enc
|
||||||
|
else:
|
||||||
|
return ciphertext + tag
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext, private_key, algo="aes-256-cbc", derivation="sha256", mac="hmac-sha256"):
|
||||||
|
# Get MAC tag
|
||||||
|
if callable(mac):
|
||||||
|
tag_length = mac.digest_size
|
||||||
|
elif mac == "hmac-sha256":
|
||||||
|
tag_length = hmac.new(b"", digestmod="sha256").digest_size
|
||||||
|
elif mac == "hmac-sha512":
|
||||||
|
tag_length = hmac.new(b"", digestmod="sha512").digest_size
|
||||||
|
elif mac is None:
|
||||||
|
tag_length = 0
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported MAC")
|
||||||
|
|
||||||
|
if len(ciphertext) < tag_length:
|
||||||
|
raise ValueError("Ciphertext is too small to contain MAC tag")
|
||||||
|
if tag_length == 0:
|
||||||
|
tag = b""
|
||||||
|
else:
|
||||||
|
ciphertext, tag = ciphertext[:-tag_length], ciphertext[-tag_length:]
|
||||||
|
|
||||||
|
orig_ciphertext = ciphertext
|
||||||
|
|
||||||
|
if len(ciphertext) < 16:
|
||||||
|
raise ValueError("Ciphertext is too small to contain IV")
|
||||||
|
iv, ciphertext = ciphertext[:16], ciphertext[16:]
|
||||||
|
|
||||||
|
public_key, pos = self._decode_public_key_openssl(ciphertext, partial=True)
|
||||||
|
ciphertext = ciphertext[pos:]
|
||||||
|
|
||||||
|
# Derive key
|
||||||
|
ecdh = self.derive(private_key, public_key)
|
||||||
|
key = self._digest(ecdh, derivation)
|
||||||
|
k_enc_len = self._aes.get_algo_key_length(algo)
|
||||||
|
if len(key) < k_enc_len:
|
||||||
|
raise ValueError("Too short digest")
|
||||||
|
k_enc, k_mac = key[:k_enc_len], key[k_enc_len:]
|
||||||
|
|
||||||
|
# Verify MAC tag
|
||||||
|
if callable(mac):
|
||||||
|
expected_tag = mac(k_mac, orig_ciphertext)
|
||||||
|
elif mac == "hmac-sha256":
|
||||||
|
h = hmac.new(k_mac, digestmod="sha256")
|
||||||
|
h.update(orig_ciphertext)
|
||||||
|
expected_tag = h.digest()
|
||||||
|
elif mac == "hmac-sha512":
|
||||||
|
h = hmac.new(k_mac, digestmod="sha512")
|
||||||
|
h.update(orig_ciphertext)
|
||||||
|
expected_tag = h.digest()
|
||||||
|
elif mac is None:
|
||||||
|
expected_tag = b""
|
||||||
|
|
||||||
|
if not hmac.compare_digest(tag, expected_tag):
|
||||||
|
raise ValueError("Invalid MAC tag")
|
||||||
|
|
||||||
|
return self._aes.decrypt(ciphertext, iv, k_enc, algo=algo)
|
||||||
|
|
||||||
|
|
||||||
|
def sign(self, data, private_key, hash="sha256", recoverable=False, entropy=None):
|
||||||
|
if len(private_key) == self._backend.public_key_length:
|
||||||
|
is_compressed = False
|
||||||
|
elif len(private_key) == self._backend.public_key_length + 1 and private_key[-1] == 1:
|
||||||
|
is_compressed = True
|
||||||
|
private_key = private_key[:-1]
|
||||||
|
else:
|
||||||
|
raise ValueError("Private key has invalid length")
|
||||||
|
|
||||||
|
data = self._digest(data, hash)
|
||||||
|
if not entropy:
|
||||||
|
v = b"\x01" * len(data)
|
||||||
|
k = b"\x00" * len(data)
|
||||||
|
k = hmac.new(k, v + b"\x00" + private_key + data, "sha256").digest()
|
||||||
|
v = hmac.new(k, v, "sha256").digest()
|
||||||
|
k = hmac.new(k, v + b"\x01" + private_key + data, "sha256").digest()
|
||||||
|
v = hmac.new(k, v, "sha256").digest()
|
||||||
|
entropy = hmac.new(k, v, "sha256").digest()
|
||||||
|
return self._backend.sign(data, private_key, recoverable, is_compressed, entropy=entropy)
|
||||||
|
|
||||||
|
|
||||||
|
def recover(self, signature, data, hash="sha256"):
|
||||||
|
# Sanity check: is this signature recoverable?
|
||||||
|
if len(signature) != 1 + 2 * self._backend.public_key_length:
|
||||||
|
raise ValueError("Cannot recover an unrecoverable signature")
|
||||||
|
x, y = self._backend.recover(signature, self._digest(data, hash))
|
||||||
|
is_compressed = signature[0] >= 31
|
||||||
|
return self._encode_public_key(x, y, is_compressed=is_compressed)
|
||||||
|
|
||||||
|
|
||||||
|
def verify(self, signature, data, public_key, hash="sha256"):
|
||||||
|
if len(signature) == 1 + 2 * self._backend.public_key_length:
|
||||||
|
# Recoverable signature
|
||||||
|
signature = signature[1:]
|
||||||
|
if len(signature) != 2 * self._backend.public_key_length:
|
||||||
|
raise ValueError("Invalid signature format")
|
||||||
|
if not isinstance(public_key, tuple):
|
||||||
|
public_key = self._decode_public_key(public_key)
|
||||||
|
return self._backend.verify(signature, self._digest(data, hash), public_key)
|
||||||
|
|
||||||
|
|
||||||
|
def derive_child(self, seed, child):
|
||||||
|
# Based on BIP32
|
||||||
|
if not 0 <= child < 2 ** 31:
|
||||||
|
raise ValueError("Invalid child index")
|
||||||
|
return self._backend.derive_child(seed, child)
|
|
@ -0,0 +1,375 @@
|
||||||
|
# Copyright (c) 2001 Markus Friedl. All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions
|
||||||
|
# are met:
|
||||||
|
# 1. Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||||
|
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||||
|
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||||
|
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||||
|
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||||
|
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# pylint: skip-file
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
digest_size = 20
|
||||||
|
digestsize = 20
|
||||||
|
|
||||||
|
class RIPEMD160:
|
||||||
|
"""
|
||||||
|
Return a new RIPEMD160 object. An optional string argument
|
||||||
|
may be provided; if present, this string will be automatically
|
||||||
|
hashed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, arg=None):
|
||||||
|
self.ctx = RMDContext()
|
||||||
|
if arg:
|
||||||
|
self.update(arg)
|
||||||
|
self.dig = None
|
||||||
|
|
||||||
|
def update(self, arg):
|
||||||
|
RMD160Update(self.ctx, arg, len(arg))
|
||||||
|
self.dig = None
|
||||||
|
|
||||||
|
def digest(self):
|
||||||
|
if self.dig:
|
||||||
|
return self.dig
|
||||||
|
ctx = self.ctx.copy()
|
||||||
|
self.dig = RMD160Final(self.ctx)
|
||||||
|
self.ctx = ctx
|
||||||
|
return self.dig
|
||||||
|
|
||||||
|
def hexdigest(self):
|
||||||
|
dig = self.digest()
|
||||||
|
hex_digest = ""
|
||||||
|
for d in dig:
|
||||||
|
hex_digest += "%02x" % d
|
||||||
|
return hex_digest
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
import copy
|
||||||
|
return copy.deepcopy(self)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def new(arg=None):
|
||||||
|
"""
|
||||||
|
Return a new RIPEMD160 object. An optional string argument
|
||||||
|
may be provided; if present, this string will be automatically
|
||||||
|
hashed.
|
||||||
|
"""
|
||||||
|
return RIPEMD160(arg)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Private.
|
||||||
|
#
|
||||||
|
|
||||||
|
class RMDContext:
|
||||||
|
def __init__(self):
|
||||||
|
self.state = [0x67452301, 0xEFCDAB89, 0x98BADCFE,
|
||||||
|
0x10325476, 0xC3D2E1F0] # uint32
|
||||||
|
self.count = 0 # uint64
|
||||||
|
self.buffer = [0] * 64 # uchar
|
||||||
|
def copy(self):
|
||||||
|
ctx = RMDContext()
|
||||||
|
ctx.state = self.state[:]
|
||||||
|
ctx.count = self.count
|
||||||
|
ctx.buffer = self.buffer[:]
|
||||||
|
return ctx
|
||||||
|
|
||||||
|
K0 = 0x00000000
|
||||||
|
K1 = 0x5A827999
|
||||||
|
K2 = 0x6ED9EBA1
|
||||||
|
K3 = 0x8F1BBCDC
|
||||||
|
K4 = 0xA953FD4E
|
||||||
|
|
||||||
|
KK0 = 0x50A28BE6
|
||||||
|
KK1 = 0x5C4DD124
|
||||||
|
KK2 = 0x6D703EF3
|
||||||
|
KK3 = 0x7A6D76E9
|
||||||
|
KK4 = 0x00000000
|
||||||
|
|
||||||
|
def ROL(n, x):
|
||||||
|
return ((x << n) & 0xffffffff) | (x >> (32 - n))
|
||||||
|
|
||||||
|
def F0(x, y, z):
|
||||||
|
return x ^ y ^ z
|
||||||
|
|
||||||
|
def F1(x, y, z):
|
||||||
|
return (x & y) | (((~x) % 0x100000000) & z)
|
||||||
|
|
||||||
|
def F2(x, y, z):
|
||||||
|
return (x | ((~y) % 0x100000000)) ^ z
|
||||||
|
|
||||||
|
def F3(x, y, z):
|
||||||
|
return (x & z) | (((~z) % 0x100000000) & y)
|
||||||
|
|
||||||
|
def F4(x, y, z):
|
||||||
|
return x ^ (y | ((~z) % 0x100000000))
|
||||||
|
|
||||||
|
def R(a, b, c, d, e, Fj, Kj, sj, rj, X):
|
||||||
|
a = ROL(sj, (a + Fj(b, c, d) + X[rj] + Kj) % 0x100000000) + e
|
||||||
|
c = ROL(10, c)
|
||||||
|
return a % 0x100000000, c
|
||||||
|
|
||||||
|
PADDING = [0x80] + [0] * 63
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
|
||||||
|
def RMD160Transform(state, block): # uint32 state[5], uchar block[64]
|
||||||
|
x = [0] * 16
|
||||||
|
if sys.byteorder == "little":
|
||||||
|
x = struct.unpack("<16L", bytes(block[0:64]))
|
||||||
|
else:
|
||||||
|
raise ValueError("Big-endian platforms are not supported")
|
||||||
|
a = state[0]
|
||||||
|
b = state[1]
|
||||||
|
c = state[2]
|
||||||
|
d = state[3]
|
||||||
|
e = state[4]
|
||||||
|
|
||||||
|
# Round 1
|
||||||
|
a, c = R(a, b, c, d, e, F0, K0, 11, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, K0, 14, 1, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, K0, 15, 2, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, K0, 12, 3, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, K0, 5, 4, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, K0, 8, 5, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, K0, 7, 6, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, K0, 9, 7, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, K0, 11, 8, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, K0, 13, 9, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, K0, 14, 10, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, K0, 15, 11, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, K0, 6, 12, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, K0, 7, 13, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, K0, 9, 14, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, K0, 8, 15, x) # #15
|
||||||
|
# Round 2
|
||||||
|
e, b = R(e, a, b, c, d, F1, K1, 7, 7, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, K1, 6, 4, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, K1, 8, 13, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, K1, 13, 1, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, K1, 11, 10, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, K1, 9, 6, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, K1, 7, 15, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, K1, 15, 3, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, K1, 7, 12, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, K1, 12, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, K1, 15, 9, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, K1, 9, 5, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, K1, 11, 2, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, K1, 7, 14, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, K1, 13, 11, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, K1, 12, 8, x) # #31
|
||||||
|
# Round 3
|
||||||
|
d, a = R(d, e, a, b, c, F2, K2, 11, 3, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, K2, 13, 10, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, K2, 6, 14, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, K2, 7, 4, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, K2, 14, 9, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, K2, 9, 15, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, K2, 13, 8, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, K2, 15, 1, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, K2, 14, 2, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, K2, 8, 7, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, K2, 13, 0, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, K2, 6, 6, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, K2, 5, 13, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, K2, 12, 11, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, K2, 7, 5, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, K2, 5, 12, x) # #47
|
||||||
|
# Round 4
|
||||||
|
c, e = R(c, d, e, a, b, F3, K3, 11, 1, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, K3, 12, 9, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, K3, 14, 11, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, K3, 15, 10, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, K3, 14, 0, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, K3, 15, 8, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, K3, 9, 12, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, K3, 8, 4, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, K3, 9, 13, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, K3, 14, 3, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, K3, 5, 7, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, K3, 6, 15, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, K3, 8, 14, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, K3, 6, 5, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, K3, 5, 6, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, K3, 12, 2, x) # #63
|
||||||
|
# Round 5
|
||||||
|
b, d = R(b, c, d, e, a, F4, K4, 9, 4, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, K4, 15, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, K4, 5, 5, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, K4, 11, 9, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, K4, 6, 7, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, K4, 8, 12, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, K4, 13, 2, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, K4, 12, 10, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, K4, 5, 14, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, K4, 12, 1, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, K4, 13, 3, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, K4, 14, 8, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, K4, 11, 11, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, K4, 8, 6, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, K4, 5, 15, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, K4, 6, 13, x) # #79
|
||||||
|
|
||||||
|
aa = a
|
||||||
|
bb = b
|
||||||
|
cc = c
|
||||||
|
dd = d
|
||||||
|
ee = e
|
||||||
|
|
||||||
|
a = state[0]
|
||||||
|
b = state[1]
|
||||||
|
c = state[2]
|
||||||
|
d = state[3]
|
||||||
|
e = state[4]
|
||||||
|
|
||||||
|
# Parallel round 1
|
||||||
|
a, c = R(a, b, c, d, e, F4, KK0, 8, 5, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, KK0, 9, 14, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, KK0, 9, 7, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, KK0, 11, 0, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, KK0, 13, 9, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, KK0, 15, 2, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, KK0, 15, 11, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, KK0, 5, 4, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, KK0, 7, 13, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, KK0, 7, 6, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, KK0, 8, 15, x)
|
||||||
|
e, b = R(e, a, b, c, d, F4, KK0, 11, 8, x)
|
||||||
|
d, a = R(d, e, a, b, c, F4, KK0, 14, 1, x)
|
||||||
|
c, e = R(c, d, e, a, b, F4, KK0, 14, 10, x)
|
||||||
|
b, d = R(b, c, d, e, a, F4, KK0, 12, 3, x)
|
||||||
|
a, c = R(a, b, c, d, e, F4, KK0, 6, 12, x) # #15
|
||||||
|
# Parallel round 2
|
||||||
|
e, b = R(e, a, b, c, d, F3, KK1, 9, 6, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, KK1, 13, 11, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, KK1, 15, 3, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, KK1, 7, 7, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, KK1, 12, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, KK1, 8, 13, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, KK1, 9, 5, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, KK1, 11, 10, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, KK1, 7, 14, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, KK1, 7, 15, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, KK1, 12, 8, x)
|
||||||
|
d, a = R(d, e, a, b, c, F3, KK1, 7, 12, x)
|
||||||
|
c, e = R(c, d, e, a, b, F3, KK1, 6, 4, x)
|
||||||
|
b, d = R(b, c, d, e, a, F3, KK1, 15, 9, x)
|
||||||
|
a, c = R(a, b, c, d, e, F3, KK1, 13, 1, x)
|
||||||
|
e, b = R(e, a, b, c, d, F3, KK1, 11, 2, x) # #31
|
||||||
|
# Parallel round 3
|
||||||
|
d, a = R(d, e, a, b, c, F2, KK2, 9, 15, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, KK2, 7, 5, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, KK2, 15, 1, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, KK2, 11, 3, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, KK2, 8, 7, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, KK2, 6, 14, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, KK2, 6, 6, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, KK2, 14, 9, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, KK2, 12, 11, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, KK2, 13, 8, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, KK2, 5, 12, x)
|
||||||
|
c, e = R(c, d, e, a, b, F2, KK2, 14, 2, x)
|
||||||
|
b, d = R(b, c, d, e, a, F2, KK2, 13, 10, x)
|
||||||
|
a, c = R(a, b, c, d, e, F2, KK2, 13, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F2, KK2, 7, 4, x)
|
||||||
|
d, a = R(d, e, a, b, c, F2, KK2, 5, 13, x) # #47
|
||||||
|
# Parallel round 4
|
||||||
|
c, e = R(c, d, e, a, b, F1, KK3, 15, 8, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, KK3, 5, 6, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, KK3, 8, 4, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, KK3, 11, 1, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, KK3, 14, 3, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, KK3, 14, 11, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, KK3, 6, 15, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, KK3, 14, 0, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, KK3, 6, 5, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, KK3, 9, 12, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, KK3, 12, 2, x)
|
||||||
|
b, d = R(b, c, d, e, a, F1, KK3, 9, 13, x)
|
||||||
|
a, c = R(a, b, c, d, e, F1, KK3, 12, 9, x)
|
||||||
|
e, b = R(e, a, b, c, d, F1, KK3, 5, 7, x)
|
||||||
|
d, a = R(d, e, a, b, c, F1, KK3, 15, 10, x)
|
||||||
|
c, e = R(c, d, e, a, b, F1, KK3, 8, 14, x) # #63
|
||||||
|
# Parallel round 5
|
||||||
|
b, d = R(b, c, d, e, a, F0, KK4, 8, 12, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, KK4, 5, 15, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, KK4, 12, 10, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, KK4, 9, 4, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, KK4, 12, 1, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, KK4, 5, 5, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, KK4, 14, 8, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, KK4, 6, 7, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, KK4, 8, 6, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, KK4, 13, 2, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, KK4, 6, 13, x)
|
||||||
|
a, c = R(a, b, c, d, e, F0, KK4, 5, 14, x)
|
||||||
|
e, b = R(e, a, b, c, d, F0, KK4, 15, 0, x)
|
||||||
|
d, a = R(d, e, a, b, c, F0, KK4, 13, 3, x)
|
||||||
|
c, e = R(c, d, e, a, b, F0, KK4, 11, 9, x)
|
||||||
|
b, d = R(b, c, d, e, a, F0, KK4, 11, 11, x) # #79
|
||||||
|
|
||||||
|
t = (state[1] + cc + d) % 0x100000000
|
||||||
|
state[1] = (state[2] + dd + e) % 0x100000000
|
||||||
|
state[2] = (state[3] + ee + a) % 0x100000000
|
||||||
|
state[3] = (state[4] + aa + b) % 0x100000000
|
||||||
|
state[4] = (state[0] + bb + c) % 0x100000000
|
||||||
|
state[0] = t % 0x100000000
|
||||||
|
|
||||||
|
|
||||||
|
def RMD160Update(ctx, inp, inplen):
|
||||||
|
if type(inp) == str:
|
||||||
|
inp = [ord(i)&0xff for i in inp]
|
||||||
|
|
||||||
|
have = int((ctx.count // 8) % 64)
|
||||||
|
inplen = int(inplen)
|
||||||
|
need = 64 - have
|
||||||
|
ctx.count += 8 * inplen
|
||||||
|
off = 0
|
||||||
|
if inplen >= need:
|
||||||
|
if have:
|
||||||
|
for i in range(need):
|
||||||
|
ctx.buffer[have + i] = inp[i]
|
||||||
|
RMD160Transform(ctx.state, ctx.buffer)
|
||||||
|
off = need
|
||||||
|
have = 0
|
||||||
|
while off + 64 <= inplen:
|
||||||
|
RMD160Transform(ctx.state, inp[off:]) #<---
|
||||||
|
off += 64
|
||||||
|
if off < inplen:
|
||||||
|
# memcpy(ctx->buffer + have, input+off, len-off)
|
||||||
|
for i in range(inplen - off):
|
||||||
|
ctx.buffer[have + i] = inp[off + i]
|
||||||
|
|
||||||
|
def RMD160Final(ctx):
|
||||||
|
size = struct.pack("<Q", ctx.count)
|
||||||
|
padlen = 64 - ((ctx.count // 8) % 64)
|
||||||
|
if padlen < 1 + 8:
|
||||||
|
padlen += 64
|
||||||
|
RMD160Update(ctx, PADDING, padlen - 8)
|
||||||
|
RMD160Update(ctx, size, 8)
|
||||||
|
return struct.pack("<5L", *ctx.state)
|
||||||
|
|
||||||
|
|
||||||
|
assert "37f332f68db77bd9d7edd4969571ad671cf9dd3b" == new("The quick brown fox jumps over the lazy dog").hexdigest()
|
||||||
|
assert "132072df690933835eb8b6ad0b77e7b6f14acad7" == new("The quick brown fox jumps over the lazy cog").hexdigest()
|
||||||
|
assert "9c1185a5c5e9fc54612808977ee8f548b2258d31" == new("").hexdigest()
|
|
@ -0,0 +1,3 @@
|
||||||
|
from .aes import aes
|
||||||
|
from .ecc import ecc
|
||||||
|
from .rsa import rsa
|
|
@ -0,0 +1,159 @@
|
||||||
|
"""
|
||||||
|
This code is public domain. Everyone has the right to do whatever they want
|
||||||
|
with it for any purpose.
|
||||||
|
|
||||||
|
In case your jurisdiction does not consider the above disclaimer valid or
|
||||||
|
enforceable, here's an MIT license for you:
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Vitalik Buterin
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from ._util import inverse
|
||||||
|
|
||||||
|
|
||||||
|
class JacobianCurve:
|
||||||
|
def __init__(self, p, n, a, b, g):
|
||||||
|
self.p = p
|
||||||
|
self.n = n
|
||||||
|
self.a = a
|
||||||
|
self.b = b
|
||||||
|
self.g = g
|
||||||
|
self.n_length = len(bin(self.n).replace("0b", ""))
|
||||||
|
|
||||||
|
|
||||||
|
def isinf(self, p):
|
||||||
|
return p[0] == 0 and p[1] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def to_jacobian(self, p):
|
||||||
|
return p[0], p[1], 1
|
||||||
|
|
||||||
|
|
||||||
|
def jacobian_double(self, p):
|
||||||
|
if not p[1]:
|
||||||
|
return 0, 0, 0
|
||||||
|
ysq = (p[1] ** 2) % self.p
|
||||||
|
s = (4 * p[0] * ysq) % self.p
|
||||||
|
m = (3 * p[0] ** 2 + self.a * p[2] ** 4) % self.p
|
||||||
|
nx = (m ** 2 - 2 * s) % self.p
|
||||||
|
ny = (m * (s - nx) - 8 * ysq ** 2) % self.p
|
||||||
|
nz = (2 * p[1] * p[2]) % self.p
|
||||||
|
return nx, ny, nz
|
||||||
|
|
||||||
|
|
||||||
|
def jacobian_add(self, p, q):
|
||||||
|
if not p[1]:
|
||||||
|
return q
|
||||||
|
if not q[1]:
|
||||||
|
return p
|
||||||
|
u1 = (p[0] * q[2] ** 2) % self.p
|
||||||
|
u2 = (q[0] * p[2] ** 2) % self.p
|
||||||
|
s1 = (p[1] * q[2] ** 3) % self.p
|
||||||
|
s2 = (q[1] * p[2] ** 3) % self.p
|
||||||
|
if u1 == u2:
|
||||||
|
if s1 != s2:
|
||||||
|
return (0, 0, 1)
|
||||||
|
return self.jacobian_double(p)
|
||||||
|
h = u2 - u1
|
||||||
|
r = s2 - s1
|
||||||
|
h2 = (h * h) % self.p
|
||||||
|
h3 = (h * h2) % self.p
|
||||||
|
u1h2 = (u1 * h2) % self.p
|
||||||
|
nx = (r ** 2 - h3 - 2 * u1h2) % self.p
|
||||||
|
ny = (r * (u1h2 - nx) - s1 * h3) % self.p
|
||||||
|
nz = (h * p[2] * q[2]) % self.p
|
||||||
|
return (nx, ny, nz)
|
||||||
|
|
||||||
|
|
||||||
|
def from_jacobian(self, p):
|
||||||
|
z = inverse(p[2], self.p)
|
||||||
|
return (p[0] * z ** 2) % self.p, (p[1] * z ** 3) % self.p
|
||||||
|
|
||||||
|
|
||||||
|
def jacobian_multiply(self, a, n, secret=False):
|
||||||
|
if a[1] == 0 or n == 0:
|
||||||
|
return 0, 0, 1
|
||||||
|
if n == 1:
|
||||||
|
return a
|
||||||
|
if n < 0 or n >= self.n:
|
||||||
|
return self.jacobian_multiply(a, n % self.n, secret)
|
||||||
|
half = self.jacobian_multiply(a, n // 2, secret)
|
||||||
|
half_sq = self.jacobian_double(half)
|
||||||
|
if secret:
|
||||||
|
# A constant-time implementation
|
||||||
|
half_sq_a = self.jacobian_add(half_sq, a)
|
||||||
|
if n % 2 == 0:
|
||||||
|
result = half_sq
|
||||||
|
if n % 2 == 1:
|
||||||
|
result = half_sq_a
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
if n % 2 == 0:
|
||||||
|
return half_sq
|
||||||
|
return self.jacobian_add(half_sq, a)
|
||||||
|
|
||||||
|
|
||||||
|
def jacobian_shamir(self, a, n, b, m):
|
||||||
|
ab = self.jacobian_add(a, b)
|
||||||
|
if n < 0 or n >= self.n:
|
||||||
|
n %= self.n
|
||||||
|
if m < 0 or m >= self.n:
|
||||||
|
m %= self.n
|
||||||
|
res = 0, 0, 1 # point on infinity
|
||||||
|
for i in range(self.n_length - 1, -1, -1):
|
||||||
|
res = self.jacobian_double(res)
|
||||||
|
has_n = n & (1 << i)
|
||||||
|
has_m = m & (1 << i)
|
||||||
|
if has_n:
|
||||||
|
if has_m == 0:
|
||||||
|
res = self.jacobian_add(res, a)
|
||||||
|
if has_m != 0:
|
||||||
|
res = self.jacobian_add(res, ab)
|
||||||
|
else:
|
||||||
|
if has_m == 0:
|
||||||
|
res = self.jacobian_add(res, (0, 0, 1)) # Try not to leak
|
||||||
|
if has_m != 0:
|
||||||
|
res = self.jacobian_add(res, b)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def fast_multiply(self, a, n, secret=False):
|
||||||
|
return self.from_jacobian(self.jacobian_multiply(self.to_jacobian(a), n, secret))
|
||||||
|
|
||||||
|
|
||||||
|
def fast_add(self, a, b):
|
||||||
|
return self.from_jacobian(self.jacobian_add(self.to_jacobian(a), self.to_jacobian(b)))
|
||||||
|
|
||||||
|
|
||||||
|
def fast_shamir(self, a, n, b, m):
|
||||||
|
return self.from_jacobian(self.jacobian_shamir(self.to_jacobian(a), n, self.to_jacobian(b), m))
|
||||||
|
|
||||||
|
|
||||||
|
def is_on_curve(self, a):
|
||||||
|
x, y = a
|
||||||
|
# Simple arithmetic check
|
||||||
|
if (pow(x, 3, self.p) + self.a * x + self.b) % self.p != y * y % self.p:
|
||||||
|
return False
|
||||||
|
# nP = point-at-infinity
|
||||||
|
return self.isinf(self.jacobian_multiply(self.to_jacobian(a), self.n))
|
|
@ -0,0 +1,79 @@
|
||||||
|
def int_to_bytes(raw, length):
|
||||||
|
data = []
|
||||||
|
for _ in range(length):
|
||||||
|
data.append(raw % 256)
|
||||||
|
raw //= 256
|
||||||
|
return bytes(data[::-1])
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_to_int(data):
|
||||||
|
raw = 0
|
||||||
|
for byte in data:
|
||||||
|
raw = raw * 256 + byte
|
||||||
|
return raw
|
||||||
|
|
||||||
|
|
||||||
|
def legendre(a, p):
|
||||||
|
res = pow(a, (p - 1) // 2, p)
|
||||||
|
if res == p - 1:
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def inverse(a, n):
|
||||||
|
if a == 0:
|
||||||
|
return 0
|
||||||
|
lm, hm = 1, 0
|
||||||
|
low, high = a % n, n
|
||||||
|
while low > 1:
|
||||||
|
r = high // low
|
||||||
|
nm, new = hm - lm * r, high - low * r
|
||||||
|
lm, low, hm, high = nm, new, lm, low
|
||||||
|
return lm % n
|
||||||
|
|
||||||
|
|
||||||
|
def square_root_mod_prime(n, p):
|
||||||
|
if n == 0:
|
||||||
|
return 0
|
||||||
|
if p == 2:
|
||||||
|
return n # We should never get here but it might be useful
|
||||||
|
if legendre(n, p) != 1:
|
||||||
|
raise ValueError("No square root")
|
||||||
|
# Optimizations
|
||||||
|
if p % 4 == 3:
|
||||||
|
return pow(n, (p + 1) // 4, p)
|
||||||
|
# 1. By factoring out powers of 2, find Q and S such that p - 1 =
|
||||||
|
# Q * 2 ** S with Q odd
|
||||||
|
q = p - 1
|
||||||
|
s = 0
|
||||||
|
while q % 2 == 0:
|
||||||
|
q //= 2
|
||||||
|
s += 1
|
||||||
|
# 2. Search for z in Z/pZ which is a quadratic non-residue
|
||||||
|
z = 1
|
||||||
|
while legendre(z, p) != -1:
|
||||||
|
z += 1
|
||||||
|
m, c, t, r = s, pow(z, q, p), pow(n, q, p), pow(n, (q + 1) // 2, p)
|
||||||
|
while True:
|
||||||
|
if t == 0:
|
||||||
|
return 0
|
||||||
|
elif t == 1:
|
||||||
|
return r
|
||||||
|
# Use repeated squaring to find the least i, 0 < i < M, such
|
||||||
|
# that t ** (2 ** i) = 1
|
||||||
|
t_sq = t
|
||||||
|
i = 0
|
||||||
|
for i in range(1, m):
|
||||||
|
t_sq = t_sq * t_sq % p
|
||||||
|
if t_sq == 1:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError("Should never get here")
|
||||||
|
# Let b = c ** (2 ** (m - i - 1))
|
||||||
|
b = pow(c, 2 ** (m - i - 1), p)
|
||||||
|
m = i
|
||||||
|
c = b * b % p
|
||||||
|
t = t * b * b % p
|
||||||
|
r = r * b % p
|
||||||
|
return r
|
|
@ -0,0 +1,101 @@
|
||||||
|
import os
|
||||||
|
import pyaes
|
||||||
|
from .._aes import AES
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ["aes"]
|
||||||
|
|
||||||
|
class AESBackend:
|
||||||
|
def _get_algo_cipher_type(self, algo):
|
||||||
|
if not algo.startswith("aes-") or algo.count("-") != 2:
|
||||||
|
raise ValueError("Unknown cipher algorithm {}".format(algo))
|
||||||
|
key_length, cipher_type = algo[4:].split("-")
|
||||||
|
if key_length not in ("128", "192", "256"):
|
||||||
|
raise ValueError("Unknown cipher algorithm {}".format(algo))
|
||||||
|
if cipher_type not in ("cbc", "ctr", "cfb", "ofb"):
|
||||||
|
raise ValueError("Unknown cipher algorithm {}".format(algo))
|
||||||
|
return cipher_type
|
||||||
|
|
||||||
|
|
||||||
|
def is_algo_supported(self, algo):
|
||||||
|
try:
|
||||||
|
self._get_algo_cipher_type(algo)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def random(self, length):
|
||||||
|
return os.urandom(length)
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt(self, data, key, algo="aes-256-cbc"):
|
||||||
|
cipher_type = self._get_algo_cipher_type(algo)
|
||||||
|
|
||||||
|
# Generate random IV
|
||||||
|
iv = os.urandom(16)
|
||||||
|
|
||||||
|
if cipher_type == "cbc":
|
||||||
|
cipher = pyaes.AESModeOfOperationCBC(key, iv=iv)
|
||||||
|
elif cipher_type == "ctr":
|
||||||
|
# The IV is actually a counter, not an IV but it does almost the
|
||||||
|
# same. Notice: pyaes always uses 1 as initial counter! Make sure
|
||||||
|
# not to call pyaes directly.
|
||||||
|
|
||||||
|
# We kinda do two conversions here: from byte array to int here, and
|
||||||
|
# from int to byte array in pyaes internals. It's possible to fix that
|
||||||
|
# but I didn't notice any performance changes so I'm keeping clean code.
|
||||||
|
iv_int = 0
|
||||||
|
for byte in iv:
|
||||||
|
iv_int = (iv_int * 256) + byte
|
||||||
|
counter = pyaes.Counter(iv_int)
|
||||||
|
cipher = pyaes.AESModeOfOperationCTR(key, counter=counter)
|
||||||
|
elif cipher_type == "cfb":
|
||||||
|
# Change segment size from default 8 bytes to 16 bytes for OpenSSL
|
||||||
|
# compatibility
|
||||||
|
cipher = pyaes.AESModeOfOperationCFB(key, iv, segment_size=16)
|
||||||
|
elif cipher_type == "ofb":
|
||||||
|
cipher = pyaes.AESModeOfOperationOFB(key, iv)
|
||||||
|
|
||||||
|
encrypter = pyaes.Encrypter(cipher)
|
||||||
|
ciphertext = encrypter.feed(data)
|
||||||
|
ciphertext += encrypter.feed()
|
||||||
|
return ciphertext, iv
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext, iv, key, algo="aes-256-cbc"):
|
||||||
|
cipher_type = self._get_algo_cipher_type(algo)
|
||||||
|
|
||||||
|
if cipher_type == "cbc":
|
||||||
|
cipher = pyaes.AESModeOfOperationCBC(key, iv=iv)
|
||||||
|
elif cipher_type == "ctr":
|
||||||
|
# The IV is actually a counter, not an IV but it does almost the
|
||||||
|
# same. Notice: pyaes always uses 1 as initial counter! Make sure
|
||||||
|
# not to call pyaes directly.
|
||||||
|
|
||||||
|
# We kinda do two conversions here: from byte array to int here, and
|
||||||
|
# from int to byte array in pyaes internals. It's possible to fix that
|
||||||
|
# but I didn't notice any performance changes so I'm keeping clean code.
|
||||||
|
iv_int = 0
|
||||||
|
for byte in iv:
|
||||||
|
iv_int = (iv_int * 256) + byte
|
||||||
|
counter = pyaes.Counter(iv_int)
|
||||||
|
cipher = pyaes.AESModeOfOperationCTR(key, counter=counter)
|
||||||
|
elif cipher_type == "cfb":
|
||||||
|
# Change segment size from default 8 bytes to 16 bytes for OpenSSL
|
||||||
|
# compatibility
|
||||||
|
cipher = pyaes.AESModeOfOperationCFB(key, iv, segment_size=16)
|
||||||
|
elif cipher_type == "ofb":
|
||||||
|
cipher = pyaes.AESModeOfOperationOFB(key, iv)
|
||||||
|
|
||||||
|
decrypter = pyaes.Decrypter(cipher)
|
||||||
|
data = decrypter.feed(ciphertext)
|
||||||
|
data += decrypter.feed()
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend(self):
|
||||||
|
return "fallback"
|
||||||
|
|
||||||
|
|
||||||
|
aes = AES(AESBackend())
|
|
@ -0,0 +1,199 @@
|
||||||
|
import hmac
|
||||||
|
import os
|
||||||
|
from ._jacobian import JacobianCurve
|
||||||
|
from .._ecc import ECC
|
||||||
|
from .aes import aes
|
||||||
|
from ._util import int_to_bytes, bytes_to_int, inverse, square_root_mod_prime
|
||||||
|
|
||||||
|
|
||||||
|
class EllipticCurveBackend:
|
||||||
|
def __init__(self, p, n, a, b, g):
|
||||||
|
self.p, self.n, self.a, self.b, self.g = p, n, a, b, g
|
||||||
|
self.jacobian = JacobianCurve(p, n, a, b, g)
|
||||||
|
|
||||||
|
self.public_key_length = (len(bin(p).replace("0b", "")) + 7) // 8
|
||||||
|
self.order_bitlength = len(bin(n).replace("0b", ""))
|
||||||
|
|
||||||
|
|
||||||
|
def _int_to_bytes(self, raw, len=None):
|
||||||
|
return int_to_bytes(raw, len or self.public_key_length)
|
||||||
|
|
||||||
|
|
||||||
|
def decompress_point(self, public_key):
|
||||||
|
# Parse & load data
|
||||||
|
x = bytes_to_int(public_key[1:])
|
||||||
|
# Calculate Y
|
||||||
|
y_square = (pow(x, 3, self.p) + self.a * x + self.b) % self.p
|
||||||
|
try:
|
||||||
|
y = square_root_mod_prime(y_square, self.p)
|
||||||
|
except Exception:
|
||||||
|
raise ValueError("Invalid public key") from None
|
||||||
|
if y % 2 != public_key[0] - 0x02:
|
||||||
|
y = self.p - y
|
||||||
|
return self._int_to_bytes(x), self._int_to_bytes(y)
|
||||||
|
|
||||||
|
|
||||||
|
def new_private_key(self):
|
||||||
|
while True:
|
||||||
|
private_key = os.urandom(self.public_key_length)
|
||||||
|
if bytes_to_int(private_key) >= self.n:
|
||||||
|
continue
|
||||||
|
return private_key
|
||||||
|
|
||||||
|
|
||||||
|
def private_to_public(self, private_key):
|
||||||
|
raw = bytes_to_int(private_key)
|
||||||
|
x, y = self.jacobian.fast_multiply(self.g, raw)
|
||||||
|
return self._int_to_bytes(x), self._int_to_bytes(y)
|
||||||
|
|
||||||
|
|
||||||
|
def ecdh(self, private_key, public_key):
|
||||||
|
x, y = public_key
|
||||||
|
x, y = bytes_to_int(x), bytes_to_int(y)
|
||||||
|
private_key = bytes_to_int(private_key)
|
||||||
|
x, _ = self.jacobian.fast_multiply((x, y), private_key, secret=True)
|
||||||
|
return self._int_to_bytes(x)
|
||||||
|
|
||||||
|
|
||||||
|
def _subject_to_int(self, subject):
|
||||||
|
return bytes_to_int(subject[:(self.order_bitlength + 7) // 8])
|
||||||
|
|
||||||
|
|
||||||
|
def sign(self, subject, raw_private_key, recoverable, is_compressed, entropy):
|
||||||
|
z = self._subject_to_int(subject)
|
||||||
|
private_key = bytes_to_int(raw_private_key)
|
||||||
|
k = bytes_to_int(entropy)
|
||||||
|
|
||||||
|
# Fix k length to prevent Minerva. Increasing multiplier by a
|
||||||
|
# multiple of order doesn't break anything. This fix was ported
|
||||||
|
# from python-ecdsa
|
||||||
|
ks = k + self.n
|
||||||
|
kt = ks + self.n
|
||||||
|
ks_len = len(bin(ks).replace("0b", "")) // 8
|
||||||
|
kt_len = len(bin(kt).replace("0b", "")) // 8
|
||||||
|
if ks_len == kt_len:
|
||||||
|
k = kt
|
||||||
|
else:
|
||||||
|
k = ks
|
||||||
|
px, py = self.jacobian.fast_multiply(self.g, k, secret=True)
|
||||||
|
|
||||||
|
r = px % self.n
|
||||||
|
if r == 0:
|
||||||
|
# Invalid k
|
||||||
|
raise ValueError("Invalid k")
|
||||||
|
|
||||||
|
s = (inverse(k, self.n) * (z + (private_key * r))) % self.n
|
||||||
|
if s == 0:
|
||||||
|
# Invalid k
|
||||||
|
raise ValueError("Invalid k")
|
||||||
|
|
||||||
|
inverted = False
|
||||||
|
if s * 2 >= self.n:
|
||||||
|
s = self.n - s
|
||||||
|
inverted = True
|
||||||
|
rs_buf = self._int_to_bytes(r) + self._int_to_bytes(s)
|
||||||
|
|
||||||
|
if recoverable:
|
||||||
|
recid = (py % 2) ^ inverted
|
||||||
|
recid += 2 * int(px // self.n)
|
||||||
|
if is_compressed:
|
||||||
|
return bytes([31 + recid]) + rs_buf
|
||||||
|
else:
|
||||||
|
if recid >= 4:
|
||||||
|
raise ValueError("Too big recovery ID, use compressed address instead")
|
||||||
|
return bytes([27 + recid]) + rs_buf
|
||||||
|
else:
|
||||||
|
return rs_buf
|
||||||
|
|
||||||
|
|
||||||
|
def recover(self, signature, subject):
|
||||||
|
z = self._subject_to_int(subject)
|
||||||
|
|
||||||
|
recid = signature[0] - 27 if signature[0] < 31 else signature[0] - 31
|
||||||
|
r = bytes_to_int(signature[1:self.public_key_length + 1])
|
||||||
|
s = bytes_to_int(signature[self.public_key_length + 1:])
|
||||||
|
|
||||||
|
# Verify bounds
|
||||||
|
if not 0 <= recid < 2 * (self.p // self.n + 1):
|
||||||
|
raise ValueError("Invalid recovery ID")
|
||||||
|
if r >= self.n:
|
||||||
|
raise ValueError("r is out of bounds")
|
||||||
|
if s >= self.n:
|
||||||
|
raise ValueError("s is out of bounds")
|
||||||
|
|
||||||
|
rinv = inverse(r, self.n)
|
||||||
|
u1 = (-z * rinv) % self.n
|
||||||
|
u2 = (s * rinv) % self.n
|
||||||
|
|
||||||
|
# Recover R
|
||||||
|
rx = r + (recid // 2) * self.n
|
||||||
|
if rx >= self.p:
|
||||||
|
raise ValueError("Rx is out of bounds")
|
||||||
|
|
||||||
|
# Almost copied from decompress_point
|
||||||
|
ry_square = (pow(rx, 3, self.p) + self.a * rx + self.b) % self.p
|
||||||
|
try:
|
||||||
|
ry = square_root_mod_prime(ry_square, self.p)
|
||||||
|
except Exception:
|
||||||
|
raise ValueError("Invalid recovered public key") from None
|
||||||
|
|
||||||
|
# Ensure the point is correct
|
||||||
|
if ry % 2 != recid % 2:
|
||||||
|
# Fix Ry sign
|
||||||
|
ry = self.p - ry
|
||||||
|
|
||||||
|
x, y = self.jacobian.fast_shamir(self.g, u1, (rx, ry), u2)
|
||||||
|
return self._int_to_bytes(x), self._int_to_bytes(y)
|
||||||
|
|
||||||
|
|
||||||
|
def verify(self, signature, subject, public_key):
|
||||||
|
z = self._subject_to_int(subject)
|
||||||
|
|
||||||
|
r = bytes_to_int(signature[:self.public_key_length])
|
||||||
|
s = bytes_to_int(signature[self.public_key_length:])
|
||||||
|
|
||||||
|
# Verify bounds
|
||||||
|
if r >= self.n:
|
||||||
|
raise ValueError("r is out of bounds")
|
||||||
|
if s >= self.n:
|
||||||
|
raise ValueError("s is out of bounds")
|
||||||
|
|
||||||
|
public_key = [bytes_to_int(c) for c in public_key]
|
||||||
|
|
||||||
|
# Ensure that the public key is correct
|
||||||
|
if not self.jacobian.is_on_curve(public_key):
|
||||||
|
raise ValueError("Public key is not on curve")
|
||||||
|
|
||||||
|
sinv = inverse(s, self.n)
|
||||||
|
u1 = (z * sinv) % self.n
|
||||||
|
u2 = (r * sinv) % self.n
|
||||||
|
|
||||||
|
x1, _ = self.jacobian.fast_shamir(self.g, u1, public_key, u2)
|
||||||
|
if r != x1 % self.n:
|
||||||
|
raise ValueError("Invalid signature")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def derive_child(self, seed, child):
|
||||||
|
# Round 1
|
||||||
|
h = hmac.new(key=b"Bitcoin seed", msg=seed, digestmod="sha512").digest()
|
||||||
|
private_key1 = h[:32]
|
||||||
|
x, y = self.private_to_public(private_key1)
|
||||||
|
public_key1 = bytes([0x02 + (y[-1] % 2)]) + x
|
||||||
|
private_key1 = bytes_to_int(private_key1)
|
||||||
|
|
||||||
|
# Round 2
|
||||||
|
msg = public_key1 + self._int_to_bytes(child, 4)
|
||||||
|
h = hmac.new(key=h[32:], msg=msg, digestmod="sha512").digest()
|
||||||
|
private_key2 = bytes_to_int(h[:32])
|
||||||
|
|
||||||
|
return self._int_to_bytes((private_key1 + private_key2) % self.n)
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_backend(cls):
|
||||||
|
return "fallback"
|
||||||
|
|
||||||
|
|
||||||
|
ecc = ECC(EllipticCurveBackend, aes)
|
|
@ -0,0 +1,8 @@
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
|
||||||
|
class RSA:
|
||||||
|
def get_backend(self):
|
||||||
|
return "fallback"
|
||||||
|
|
||||||
|
|
||||||
|
rsa = RSA()
|
|
@ -0,0 +1,3 @@
|
||||||
|
from .aes import aes
|
||||||
|
from .ecc import ecc
|
||||||
|
from .rsa import rsa
|
|
@ -0,0 +1,156 @@
|
||||||
|
import ctypes
|
||||||
|
import threading
|
||||||
|
from .._aes import AES
|
||||||
|
from ..fallback.aes import aes as fallback_aes
|
||||||
|
from .library import lib, openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize functions
|
||||||
|
try:
|
||||||
|
lib.EVP_CIPHER_CTX_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
lib.EVP_get_cipherbyname.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
|
||||||
|
|
||||||
|
thread_local = threading.local()
|
||||||
|
|
||||||
|
|
||||||
|
class Context:
|
||||||
|
def __init__(self, ptr, do_free):
|
||||||
|
self.lib = lib
|
||||||
|
self.ptr = ptr
|
||||||
|
self.do_free = do_free
|
||||||
|
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self.do_free:
|
||||||
|
self.lib.EVP_CIPHER_CTX_free(self.ptr)
|
||||||
|
|
||||||
|
|
||||||
|
class AESBackend:
|
||||||
|
ALGOS = (
|
||||||
|
"aes-128-cbc", "aes-192-cbc", "aes-256-cbc",
|
||||||
|
"aes-128-ctr", "aes-192-ctr", "aes-256-ctr",
|
||||||
|
"aes-128-cfb", "aes-192-cfb", "aes-256-cfb",
|
||||||
|
"aes-128-ofb", "aes-192-ofb", "aes-256-ofb"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.is_supported_ctx_new = hasattr(lib, "EVP_CIPHER_CTX_new")
|
||||||
|
self.is_supported_ctx_reset = hasattr(lib, "EVP_CIPHER_CTX_reset")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_ctx(self):
|
||||||
|
if not hasattr(thread_local, "ctx"):
|
||||||
|
if self.is_supported_ctx_new:
|
||||||
|
thread_local.ctx = Context(lib.EVP_CIPHER_CTX_new(), True)
|
||||||
|
else:
|
||||||
|
# 1 KiB ought to be enough for everybody. We don't know the real
|
||||||
|
# size of the context buffer because we are unsure about padding and
|
||||||
|
# pointer size
|
||||||
|
thread_local.ctx = Context(ctypes.create_string_buffer(1024), False)
|
||||||
|
return thread_local.ctx.ptr
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend(self):
|
||||||
|
return openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
def _get_cipher(self, algo):
|
||||||
|
if algo not in self.ALGOS:
|
||||||
|
raise ValueError("Unknown cipher algorithm {}".format(algo))
|
||||||
|
cipher = lib.EVP_get_cipherbyname(algo.encode())
|
||||||
|
if not cipher:
|
||||||
|
raise ValueError("Unknown cipher algorithm {}".format(algo))
|
||||||
|
return cipher
|
||||||
|
|
||||||
|
|
||||||
|
def is_algo_supported(self, algo):
|
||||||
|
try:
|
||||||
|
self._get_cipher(algo)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def random(self, length):
|
||||||
|
entropy = ctypes.create_string_buffer(length)
|
||||||
|
lib.RAND_bytes(entropy, length)
|
||||||
|
return bytes(entropy)
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt(self, data, key, algo="aes-256-cbc"):
|
||||||
|
# Initialize context
|
||||||
|
ctx = self._get_ctx()
|
||||||
|
if not self.is_supported_ctx_new:
|
||||||
|
lib.EVP_CIPHER_CTX_init(ctx)
|
||||||
|
try:
|
||||||
|
lib.EVP_EncryptInit_ex(ctx, self._get_cipher(algo), None, None, None)
|
||||||
|
|
||||||
|
# Generate random IV
|
||||||
|
iv_length = 16
|
||||||
|
iv = self.random(iv_length)
|
||||||
|
|
||||||
|
# Set key and IV
|
||||||
|
lib.EVP_EncryptInit_ex(ctx, None, None, key, iv)
|
||||||
|
|
||||||
|
# Actually encrypt
|
||||||
|
block_size = 16
|
||||||
|
output = ctypes.create_string_buffer((len(data) // block_size + 1) * block_size)
|
||||||
|
output_len = ctypes.c_int()
|
||||||
|
|
||||||
|
if not lib.EVP_CipherUpdate(ctx, output, ctypes.byref(output_len), data, len(data)):
|
||||||
|
raise ValueError("Could not feed cipher with data")
|
||||||
|
|
||||||
|
new_output = ctypes.byref(output, output_len.value)
|
||||||
|
output_len2 = ctypes.c_int()
|
||||||
|
if not lib.EVP_CipherFinal_ex(ctx, new_output, ctypes.byref(output_len2)):
|
||||||
|
raise ValueError("Could not finalize cipher")
|
||||||
|
|
||||||
|
ciphertext = output[:output_len.value + output_len2.value]
|
||||||
|
return ciphertext, iv
|
||||||
|
finally:
|
||||||
|
if self.is_supported_ctx_reset:
|
||||||
|
lib.EVP_CIPHER_CTX_reset(ctx)
|
||||||
|
else:
|
||||||
|
lib.EVP_CIPHER_CTX_cleanup(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext, iv, key, algo="aes-256-cbc"):
|
||||||
|
# Initialize context
|
||||||
|
ctx = self._get_ctx()
|
||||||
|
if not self.is_supported_ctx_new:
|
||||||
|
lib.EVP_CIPHER_CTX_init(ctx)
|
||||||
|
try:
|
||||||
|
lib.EVP_DecryptInit_ex(ctx, self._get_cipher(algo), None, None, None)
|
||||||
|
|
||||||
|
# Make sure IV length is correct
|
||||||
|
iv_length = 16
|
||||||
|
if len(iv) != iv_length:
|
||||||
|
raise ValueError("Expected IV to be {} bytes, got {} bytes".format(iv_length, len(iv)))
|
||||||
|
|
||||||
|
# Set key and IV
|
||||||
|
lib.EVP_DecryptInit_ex(ctx, None, None, key, iv)
|
||||||
|
|
||||||
|
# Actually decrypt
|
||||||
|
output = ctypes.create_string_buffer(len(ciphertext))
|
||||||
|
output_len = ctypes.c_int()
|
||||||
|
|
||||||
|
if not lib.EVP_DecryptUpdate(ctx, output, ctypes.byref(output_len), ciphertext, len(ciphertext)):
|
||||||
|
raise ValueError("Could not feed decipher with ciphertext")
|
||||||
|
|
||||||
|
new_output = ctypes.byref(output, output_len.value)
|
||||||
|
output_len2 = ctypes.c_int()
|
||||||
|
if not lib.EVP_DecryptFinal_ex(ctx, new_output, ctypes.byref(output_len2)):
|
||||||
|
raise ValueError("Could not finalize decipher")
|
||||||
|
|
||||||
|
return output[:output_len.value + output_len2.value]
|
||||||
|
finally:
|
||||||
|
if self.is_supported_ctx_reset:
|
||||||
|
lib.EVP_CIPHER_CTX_reset(ctx)
|
||||||
|
else:
|
||||||
|
lib.EVP_CIPHER_CTX_cleanup(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
aes = AES(AESBackend(), fallback_aes)
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Can be redefined by user
|
||||||
|
def discover():
|
||||||
|
pass
|
|
@ -0,0 +1,583 @@
|
||||||
|
import ctypes
|
||||||
|
import hmac
|
||||||
|
import threading
|
||||||
|
from .._ecc import ECC
|
||||||
|
from .aes import aes
|
||||||
|
from .library import lib, openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize functions
|
||||||
|
lib.BN_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.BN_bin2bn.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.BN_CTX_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.EC_GROUP_new_curve_GFp.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.EC_KEY_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.EC_POINT_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.EC_KEY_get0_private_key.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
lib.EVP_PKEY_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
try:
|
||||||
|
lib.EVP_PKEY_CTX_new.restype = ctypes.POINTER(ctypes.c_char)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
thread_local = threading.local()
|
||||||
|
|
||||||
|
|
||||||
|
# This lock is required to keep ECC thread-safe. Old OpenSSL versions (before
|
||||||
|
# 1.1.0) use global objects so they aren't thread safe. Fortunately we can check
|
||||||
|
# the code to find out which functions are thread safe.
|
||||||
|
#
|
||||||
|
# For example, EC_GROUP_new_curve_GFp checks global error code to initialize
|
||||||
|
# the group, so if two errors happen at once or two threads read the error code,
|
||||||
|
# or the codes are read in the wrong order, the group is initialized in a wrong
|
||||||
|
# way.
|
||||||
|
#
|
||||||
|
# EC_KEY_new_by_curve_name calls EC_GROUP_new_curve_GFp so it's not thread
|
||||||
|
# safe. We can't use the lock because it would be too slow; instead, we use
|
||||||
|
# EC_KEY_new and then EC_KEY_set_group which calls EC_GROUP_copy instead which
|
||||||
|
# is thread safe.
|
||||||
|
lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
class BN:
|
||||||
|
# BN_CTX
|
||||||
|
class Context:
|
||||||
|
def __init__(self):
|
||||||
|
self.ptr = lib.BN_CTX_new()
|
||||||
|
self.lib = lib # For finalizer
|
||||||
|
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.lib.BN_CTX_free(self.ptr)
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get(cls):
|
||||||
|
# Get thread-safe contexf
|
||||||
|
if not hasattr(thread_local, "bn_ctx"):
|
||||||
|
thread_local.bn_ctx = cls()
|
||||||
|
return thread_local.bn_ctx.ptr
|
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, value=None, link_only=False):
|
||||||
|
if link_only:
|
||||||
|
self.bn = value
|
||||||
|
self._free = False
|
||||||
|
else:
|
||||||
|
if value is None:
|
||||||
|
self.bn = lib.BN_new()
|
||||||
|
self._free = True
|
||||||
|
elif isinstance(value, int) and value < 256:
|
||||||
|
self.bn = lib.BN_new()
|
||||||
|
lib.BN_clear(self.bn)
|
||||||
|
lib.BN_add_word(self.bn, value)
|
||||||
|
self._free = True
|
||||||
|
else:
|
||||||
|
if isinstance(value, int):
|
||||||
|
value = value.to_bytes(128, "big")
|
||||||
|
self.bn = lib.BN_bin2bn(value, len(value), None)
|
||||||
|
self._free = True
|
||||||
|
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
if self._free:
|
||||||
|
lib.BN_free(self.bn)
|
||||||
|
|
||||||
|
|
||||||
|
def bytes(self, length=None):
|
||||||
|
buf = ctypes.create_string_buffer((len(self) + 7) // 8)
|
||||||
|
lib.BN_bn2bin(self.bn, buf)
|
||||||
|
buf = bytes(buf)
|
||||||
|
if length is None:
|
||||||
|
return buf
|
||||||
|
else:
|
||||||
|
if length < len(buf):
|
||||||
|
raise ValueError("Too little space for BN")
|
||||||
|
return b"\x00" * (length - len(buf)) + buf
|
||||||
|
|
||||||
|
def __int__(self):
|
||||||
|
value = 0
|
||||||
|
for byte in self.bytes():
|
||||||
|
value = value * 256 + byte
|
||||||
|
return value
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return lib.BN_num_bits(self.bn)
|
||||||
|
|
||||||
|
|
||||||
|
def inverse(self, modulo):
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_mod_inverse(result.bn, self.bn, modulo.bn, BN.Context.get()):
|
||||||
|
raise ValueError("Could not compute inverse")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def __floordiv__(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only divide BN by BN, not {}".format(other))
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_div(result.bn, None, self.bn, other.bn, BN.Context.get()):
|
||||||
|
raise ZeroDivisionError("Division by zero")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __mod__(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only divide BN by BN, not {}".format(other))
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_div(None, result.bn, self.bn, other.bn, BN.Context.get()):
|
||||||
|
raise ZeroDivisionError("Division by zero")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only sum BN's, not BN and {}".format(other))
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_add(result.bn, self.bn, other.bn):
|
||||||
|
raise ValueError("Could not sum two BN's")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only subtract BN's, not BN and {}".format(other))
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_sub(result.bn, self.bn, other.bn):
|
||||||
|
raise ValueError("Could not subtract BN from BN")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __mul__(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only multiply BN by BN, not {}".format(other))
|
||||||
|
result = BN()
|
||||||
|
if not lib.BN_mul(result.bn, self.bn, other.bn, BN.Context.get()):
|
||||||
|
raise ValueError("Could not multiply two BN's")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __neg__(self):
|
||||||
|
return BN(0) - self
|
||||||
|
|
||||||
|
|
||||||
|
# A dirty but nice way to update current BN and free old BN at the same time
|
||||||
|
def __imod__(self, other):
|
||||||
|
res = self % other
|
||||||
|
self.bn, res.bn = res.bn, self.bn
|
||||||
|
return self
|
||||||
|
def __iadd__(self, other):
|
||||||
|
res = self + other
|
||||||
|
self.bn, res.bn = res.bn, self.bn
|
||||||
|
return self
|
||||||
|
def __isub__(self, other):
|
||||||
|
res = self - other
|
||||||
|
self.bn, res.bn = res.bn, self.bn
|
||||||
|
return self
|
||||||
|
def __imul__(self, other):
|
||||||
|
res = self * other
|
||||||
|
self.bn, res.bn = res.bn, self.bn
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
def cmp(self, other):
|
||||||
|
if not isinstance(other, BN):
|
||||||
|
raise TypeError("Can only compare BN with BN, not {}".format(other))
|
||||||
|
return lib.BN_cmp(self.bn, other.bn)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.cmp(other) == 0
|
||||||
|
def __lt__(self, other):
|
||||||
|
return self.cmp(other) < 0
|
||||||
|
def __gt__(self, other):
|
||||||
|
return self.cmp(other) > 0
|
||||||
|
def __ne__(self, other):
|
||||||
|
return self.cmp(other) != 0
|
||||||
|
def __le__(self, other):
|
||||||
|
return self.cmp(other) <= 0
|
||||||
|
def __ge__(self, other):
|
||||||
|
return self.cmp(other) >= 0
|
||||||
|
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<BN {}>".format(int(self))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(int(self))
|
||||||
|
|
||||||
|
|
||||||
|
class EllipticCurveBackend:
|
||||||
|
def __init__(self, p, n, a, b, g):
|
||||||
|
bn_ctx = BN.Context.get()
|
||||||
|
|
||||||
|
self.lib = lib # For finalizer
|
||||||
|
|
||||||
|
self.p = BN(p)
|
||||||
|
self.order = BN(n)
|
||||||
|
self.a = BN(a)
|
||||||
|
self.b = BN(b)
|
||||||
|
self.h = BN((p + n // 2) // n)
|
||||||
|
|
||||||
|
with lock:
|
||||||
|
# Thread-safety
|
||||||
|
self.group = lib.EC_GROUP_new_curve_GFp(self.p.bn, self.a.bn, self.b.bn, bn_ctx)
|
||||||
|
if not self.group:
|
||||||
|
raise ValueError("Could not create group object")
|
||||||
|
generator = self._public_key_to_point(g)
|
||||||
|
lib.EC_GROUP_set_generator(self.group, generator, self.order.bn, self.h.bn)
|
||||||
|
if not self.group:
|
||||||
|
raise ValueError("The curve is not supported by OpenSSL")
|
||||||
|
|
||||||
|
self.public_key_length = (len(self.p) + 7) // 8
|
||||||
|
|
||||||
|
self.is_supported_evp_pkey_ctx = hasattr(lib, "EVP_PKEY_CTX_new")
|
||||||
|
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.lib.EC_GROUP_free(self.group)
|
||||||
|
|
||||||
|
|
||||||
|
def _private_key_to_ec_key(self, private_key):
|
||||||
|
# Thread-safety
|
||||||
|
eckey = lib.EC_KEY_new()
|
||||||
|
lib.EC_KEY_set_group(eckey, self.group)
|
||||||
|
if not eckey:
|
||||||
|
raise ValueError("Failed to allocate EC_KEY")
|
||||||
|
private_key = BN(private_key)
|
||||||
|
if not lib.EC_KEY_set_private_key(eckey, private_key.bn):
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
raise ValueError("Invalid private key")
|
||||||
|
return eckey, private_key
|
||||||
|
|
||||||
|
|
||||||
|
def _public_key_to_point(self, public_key):
|
||||||
|
x = BN(public_key[0])
|
||||||
|
y = BN(public_key[1])
|
||||||
|
# EC_KEY_set_public_key_affine_coordinates is not supported by
|
||||||
|
# OpenSSL 1.0.0 so we can't use it
|
||||||
|
point = lib.EC_POINT_new(self.group)
|
||||||
|
if not lib.EC_POINT_set_affine_coordinates_GFp(self.group, point, x.bn, y.bn, BN.Context.get()):
|
||||||
|
raise ValueError("Could not set public key affine coordinates")
|
||||||
|
return point
|
||||||
|
|
||||||
|
|
||||||
|
def _public_key_to_ec_key(self, public_key):
|
||||||
|
# Thread-safety
|
||||||
|
eckey = lib.EC_KEY_new()
|
||||||
|
lib.EC_KEY_set_group(eckey, self.group)
|
||||||
|
if not eckey:
|
||||||
|
raise ValueError("Failed to allocate EC_KEY")
|
||||||
|
try:
|
||||||
|
# EC_KEY_set_public_key_affine_coordinates is not supported by
|
||||||
|
# OpenSSL 1.0.0 so we can't use it
|
||||||
|
point = self._public_key_to_point(public_key)
|
||||||
|
if not lib.EC_KEY_set_public_key(eckey, point):
|
||||||
|
raise ValueError("Could not set point")
|
||||||
|
lib.EC_POINT_free(point)
|
||||||
|
return eckey
|
||||||
|
except Exception as e:
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
raise e from None
|
||||||
|
|
||||||
|
|
||||||
|
def _point_to_affine(self, point):
|
||||||
|
# Convert to affine coordinates
|
||||||
|
x = BN()
|
||||||
|
y = BN()
|
||||||
|
if lib.EC_POINT_get_affine_coordinates_GFp(self.group, point, x.bn, y.bn, BN.Context.get()) != 1:
|
||||||
|
raise ValueError("Failed to convert public key to affine coordinates")
|
||||||
|
# Convert to binary
|
||||||
|
if (len(x) + 7) // 8 > self.public_key_length:
|
||||||
|
raise ValueError("Public key X coordinate is too large")
|
||||||
|
if (len(y) + 7) // 8 > self.public_key_length:
|
||||||
|
raise ValueError("Public key Y coordinate is too large")
|
||||||
|
return x.bytes(self.public_key_length), y.bytes(self.public_key_length)
|
||||||
|
|
||||||
|
|
||||||
|
def decompress_point(self, public_key):
|
||||||
|
point = lib.EC_POINT_new(self.group)
|
||||||
|
if not point:
|
||||||
|
raise ValueError("Could not create point")
|
||||||
|
try:
|
||||||
|
if not lib.EC_POINT_oct2point(self.group, point, public_key, len(public_key), BN.Context.get()):
|
||||||
|
raise ValueError("Invalid compressed public key")
|
||||||
|
return self._point_to_affine(point)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(point)
|
||||||
|
|
||||||
|
|
||||||
|
def new_private_key(self):
|
||||||
|
# Create random key
|
||||||
|
# Thread-safety
|
||||||
|
eckey = lib.EC_KEY_new()
|
||||||
|
lib.EC_KEY_set_group(eckey, self.group)
|
||||||
|
lib.EC_KEY_generate_key(eckey)
|
||||||
|
# To big integer
|
||||||
|
private_key = BN(lib.EC_KEY_get0_private_key(eckey), link_only=True)
|
||||||
|
# To binary
|
||||||
|
private_key_buf = private_key.bytes(self.public_key_length)
|
||||||
|
# Cleanup
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
return private_key_buf
|
||||||
|
|
||||||
|
|
||||||
|
def private_to_public(self, private_key):
|
||||||
|
eckey, private_key = self._private_key_to_ec_key(private_key)
|
||||||
|
try:
|
||||||
|
# Derive public key
|
||||||
|
point = lib.EC_POINT_new(self.group)
|
||||||
|
try:
|
||||||
|
if not lib.EC_POINT_mul(self.group, point, private_key.bn, None, None, BN.Context.get()):
|
||||||
|
raise ValueError("Failed to derive public key")
|
||||||
|
return self._point_to_affine(point)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(point)
|
||||||
|
finally:
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
|
||||||
|
|
||||||
|
def ecdh(self, private_key, public_key):
|
||||||
|
if not self.is_supported_evp_pkey_ctx:
|
||||||
|
# Use ECDH_compute_key instead
|
||||||
|
# Create EC_KEY from private key
|
||||||
|
eckey, _ = self._private_key_to_ec_key(private_key)
|
||||||
|
try:
|
||||||
|
# Create EC_POINT from public key
|
||||||
|
point = self._public_key_to_point(public_key)
|
||||||
|
try:
|
||||||
|
key = ctypes.create_string_buffer(self.public_key_length)
|
||||||
|
if lib.ECDH_compute_key(key, self.public_key_length, point, eckey, None) == -1:
|
||||||
|
raise ValueError("Could not compute shared secret")
|
||||||
|
return bytes(key)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(point)
|
||||||
|
finally:
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
|
||||||
|
# Private key:
|
||||||
|
# Create EC_KEY
|
||||||
|
eckey, _ = self._private_key_to_ec_key(private_key)
|
||||||
|
try:
|
||||||
|
# Convert to EVP_PKEY
|
||||||
|
pkey = lib.EVP_PKEY_new()
|
||||||
|
if not pkey:
|
||||||
|
raise ValueError("Could not create private key object")
|
||||||
|
try:
|
||||||
|
lib.EVP_PKEY_set1_EC_KEY(pkey, eckey)
|
||||||
|
|
||||||
|
# Public key:
|
||||||
|
# Create EC_KEY
|
||||||
|
peer_eckey = self._public_key_to_ec_key(public_key)
|
||||||
|
try:
|
||||||
|
# Convert to EVP_PKEY
|
||||||
|
peer_pkey = lib.EVP_PKEY_new()
|
||||||
|
if not peer_pkey:
|
||||||
|
raise ValueError("Could not create public key object")
|
||||||
|
try:
|
||||||
|
lib.EVP_PKEY_set1_EC_KEY(peer_pkey, peer_eckey)
|
||||||
|
|
||||||
|
# Create context
|
||||||
|
ctx = lib.EVP_PKEY_CTX_new(pkey, None)
|
||||||
|
if not ctx:
|
||||||
|
raise ValueError("Could not create EVP context")
|
||||||
|
try:
|
||||||
|
if lib.EVP_PKEY_derive_init(ctx) != 1:
|
||||||
|
raise ValueError("Could not initialize key derivation")
|
||||||
|
if not lib.EVP_PKEY_derive_set_peer(ctx, peer_pkey):
|
||||||
|
raise ValueError("Could not set peer")
|
||||||
|
|
||||||
|
# Actually derive
|
||||||
|
key_len = ctypes.c_int(0)
|
||||||
|
lib.EVP_PKEY_derive(ctx, None, ctypes.byref(key_len))
|
||||||
|
key = ctypes.create_string_buffer(key_len.value)
|
||||||
|
lib.EVP_PKEY_derive(ctx, key, ctypes.byref(key_len))
|
||||||
|
|
||||||
|
return bytes(key)
|
||||||
|
finally:
|
||||||
|
lib.EVP_PKEY_CTX_free(ctx)
|
||||||
|
finally:
|
||||||
|
lib.EVP_PKEY_free(peer_pkey)
|
||||||
|
finally:
|
||||||
|
lib.EC_KEY_free(peer_eckey)
|
||||||
|
finally:
|
||||||
|
lib.EVP_PKEY_free(pkey)
|
||||||
|
finally:
|
||||||
|
lib.EC_KEY_free(eckey)
|
||||||
|
|
||||||
|
|
||||||
|
def _subject_to_bn(self, subject):
|
||||||
|
return BN(subject[:(len(self.order) + 7) // 8])
|
||||||
|
|
||||||
|
|
||||||
|
def sign(self, subject, private_key, recoverable, is_compressed, entropy):
|
||||||
|
z = self._subject_to_bn(subject)
|
||||||
|
private_key = BN(private_key)
|
||||||
|
k = BN(entropy)
|
||||||
|
|
||||||
|
rp = lib.EC_POINT_new(self.group)
|
||||||
|
bn_ctx = BN.Context.get()
|
||||||
|
try:
|
||||||
|
# Fix Minerva
|
||||||
|
k1 = k + self.order
|
||||||
|
k2 = k1 + self.order
|
||||||
|
if len(k1) == len(k2):
|
||||||
|
k = k2
|
||||||
|
else:
|
||||||
|
k = k1
|
||||||
|
if not lib.EC_POINT_mul(self.group, rp, k.bn, None, None, bn_ctx):
|
||||||
|
raise ValueError("Could not generate R")
|
||||||
|
# Convert to affine coordinates
|
||||||
|
rx = BN()
|
||||||
|
ry = BN()
|
||||||
|
if lib.EC_POINT_get_affine_coordinates_GFp(self.group, rp, rx.bn, ry.bn, bn_ctx) != 1:
|
||||||
|
raise ValueError("Failed to convert R to affine coordinates")
|
||||||
|
r = rx % self.order
|
||||||
|
if r == BN(0):
|
||||||
|
raise ValueError("Invalid k")
|
||||||
|
# Calculate s = k^-1 * (z + r * private_key) mod n
|
||||||
|
s = (k.inverse(self.order) * (z + r * private_key)) % self.order
|
||||||
|
if s == BN(0):
|
||||||
|
raise ValueError("Invalid k")
|
||||||
|
|
||||||
|
inverted = False
|
||||||
|
if s * BN(2) >= self.order:
|
||||||
|
s = self.order - s
|
||||||
|
inverted = True
|
||||||
|
|
||||||
|
r_buf = r.bytes(self.public_key_length)
|
||||||
|
s_buf = s.bytes(self.public_key_length)
|
||||||
|
if recoverable:
|
||||||
|
# Generate recid
|
||||||
|
recid = int(ry % BN(2)) ^ inverted
|
||||||
|
# The line below is highly unlikely to matter in case of
|
||||||
|
# secp256k1 but might make sense for other curves
|
||||||
|
recid += 2 * int(rx // self.order)
|
||||||
|
if is_compressed:
|
||||||
|
return bytes([31 + recid]) + r_buf + s_buf
|
||||||
|
else:
|
||||||
|
if recid >= 4:
|
||||||
|
raise ValueError("Too big recovery ID, use compressed address instead")
|
||||||
|
return bytes([27 + recid]) + r_buf + s_buf
|
||||||
|
else:
|
||||||
|
return r_buf + s_buf
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(rp)
|
||||||
|
|
||||||
|
|
||||||
|
def recover(self, signature, subject):
|
||||||
|
recid = signature[0] - 27 if signature[0] < 31 else signature[0] - 31
|
||||||
|
r = BN(signature[1:self.public_key_length + 1])
|
||||||
|
s = BN(signature[self.public_key_length + 1:])
|
||||||
|
|
||||||
|
# Verify bounds
|
||||||
|
if r >= self.order:
|
||||||
|
raise ValueError("r is out of bounds")
|
||||||
|
if s >= self.order:
|
||||||
|
raise ValueError("s is out of bounds")
|
||||||
|
|
||||||
|
bn_ctx = BN.Context.get()
|
||||||
|
|
||||||
|
z = self._subject_to_bn(subject)
|
||||||
|
|
||||||
|
rinv = r.inverse(self.order)
|
||||||
|
u1 = (-z * rinv) % self.order
|
||||||
|
u2 = (s * rinv) % self.order
|
||||||
|
|
||||||
|
# Recover R
|
||||||
|
rx = r + BN(recid // 2) * self.order
|
||||||
|
if rx >= self.p:
|
||||||
|
raise ValueError("Rx is out of bounds")
|
||||||
|
rp = lib.EC_POINT_new(self.group)
|
||||||
|
if not rp:
|
||||||
|
raise ValueError("Could not create R")
|
||||||
|
try:
|
||||||
|
init_buf = b"\x02" + rx.bytes(self.public_key_length)
|
||||||
|
if not lib.EC_POINT_oct2point(self.group, rp, init_buf, len(init_buf), bn_ctx):
|
||||||
|
raise ValueError("Could not use Rx to initialize point")
|
||||||
|
ry = BN()
|
||||||
|
if lib.EC_POINT_get_affine_coordinates_GFp(self.group, rp, None, ry.bn, bn_ctx) != 1:
|
||||||
|
raise ValueError("Failed to convert R to affine coordinates")
|
||||||
|
if int(ry % BN(2)) != recid % 2:
|
||||||
|
# Fix Ry sign
|
||||||
|
ry = self.p - ry
|
||||||
|
if lib.EC_POINT_set_affine_coordinates_GFp(self.group, rp, rx.bn, ry.bn, bn_ctx) != 1:
|
||||||
|
raise ValueError("Failed to update R coordinates")
|
||||||
|
|
||||||
|
# Recover public key
|
||||||
|
result = lib.EC_POINT_new(self.group)
|
||||||
|
if not result:
|
||||||
|
raise ValueError("Could not create point")
|
||||||
|
try:
|
||||||
|
if not lib.EC_POINT_mul(self.group, result, u1.bn, rp, u2.bn, bn_ctx):
|
||||||
|
raise ValueError("Could not recover public key")
|
||||||
|
return self._point_to_affine(result)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(result)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(rp)
|
||||||
|
|
||||||
|
|
||||||
|
def verify(self, signature, subject, public_key):
|
||||||
|
r_raw = signature[:self.public_key_length]
|
||||||
|
r = BN(r_raw)
|
||||||
|
s = BN(signature[self.public_key_length:])
|
||||||
|
if r >= self.order:
|
||||||
|
raise ValueError("r is out of bounds")
|
||||||
|
if s >= self.order:
|
||||||
|
raise ValueError("s is out of bounds")
|
||||||
|
|
||||||
|
bn_ctx = BN.Context.get()
|
||||||
|
|
||||||
|
z = self._subject_to_bn(subject)
|
||||||
|
|
||||||
|
pub_p = lib.EC_POINT_new(self.group)
|
||||||
|
if not pub_p:
|
||||||
|
raise ValueError("Could not create public key point")
|
||||||
|
try:
|
||||||
|
init_buf = b"\x04" + public_key[0] + public_key[1]
|
||||||
|
if not lib.EC_POINT_oct2point(self.group, pub_p, init_buf, len(init_buf), bn_ctx):
|
||||||
|
raise ValueError("Could initialize point")
|
||||||
|
|
||||||
|
sinv = s.inverse(self.order)
|
||||||
|
u1 = (z * sinv) % self.order
|
||||||
|
u2 = (r * sinv) % self.order
|
||||||
|
|
||||||
|
# Recover public key
|
||||||
|
result = lib.EC_POINT_new(self.group)
|
||||||
|
if not result:
|
||||||
|
raise ValueError("Could not create point")
|
||||||
|
try:
|
||||||
|
if not lib.EC_POINT_mul(self.group, result, u1.bn, pub_p, u2.bn, bn_ctx):
|
||||||
|
raise ValueError("Could not recover public key")
|
||||||
|
if BN(self._point_to_affine(result)[0]) % self.order != r:
|
||||||
|
raise ValueError("Invalid signature")
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(result)
|
||||||
|
finally:
|
||||||
|
lib.EC_POINT_free(pub_p)
|
||||||
|
|
||||||
|
|
||||||
|
def derive_child(self, seed, child):
|
||||||
|
# Round 1
|
||||||
|
h = hmac.new(key=b"Bitcoin seed", msg=seed, digestmod="sha512").digest()
|
||||||
|
private_key1 = h[:32]
|
||||||
|
x, y = self.private_to_public(private_key1)
|
||||||
|
public_key1 = bytes([0x02 + (y[-1] % 2)]) + x
|
||||||
|
private_key1 = BN(private_key1)
|
||||||
|
|
||||||
|
# Round 2
|
||||||
|
child_bytes = []
|
||||||
|
for _ in range(4):
|
||||||
|
child_bytes.append(child & 255)
|
||||||
|
child >>= 8
|
||||||
|
child_bytes = bytes(child_bytes[::-1])
|
||||||
|
msg = public_key1 + child_bytes
|
||||||
|
h = hmac.new(key=h[32:], msg=msg, digestmod="sha512").digest()
|
||||||
|
private_key2 = BN(h[:32])
|
||||||
|
|
||||||
|
return ((private_key1 + private_key2) % self.order).bytes(self.public_key_length)
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_backend(cls):
|
||||||
|
return openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
ecc = ECC(EllipticCurveBackend, aes)
|
|
@ -0,0 +1,98 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import ctypes
|
||||||
|
import ctypes.util
|
||||||
|
from .discovery import discover as user_discover
|
||||||
|
|
||||||
|
|
||||||
|
# Disable false-positive _MEIPASS
|
||||||
|
# pylint: disable=no-member,protected-access
|
||||||
|
|
||||||
|
# Discover OpenSSL library
|
||||||
|
def discover_paths():
|
||||||
|
# Search local files first
|
||||||
|
if "win" in sys.platform:
|
||||||
|
# Windows
|
||||||
|
names = [
|
||||||
|
"libeay32.dll"
|
||||||
|
]
|
||||||
|
openssl_paths = [os.path.abspath(path) for path in names]
|
||||||
|
if hasattr(sys, "_MEIPASS"):
|
||||||
|
openssl_paths += [os.path.join(sys._MEIPASS, path) for path in openssl_paths]
|
||||||
|
openssl_paths.append(ctypes.util.find_library("libeay32"))
|
||||||
|
elif "darwin" in sys.platform:
|
||||||
|
# Mac OS
|
||||||
|
names = [
|
||||||
|
"libcrypto.dylib",
|
||||||
|
"libcrypto.1.1.0.dylib",
|
||||||
|
"libcrypto.1.0.2.dylib",
|
||||||
|
"libcrypto.1.0.1.dylib",
|
||||||
|
"libcrypto.1.0.0.dylib",
|
||||||
|
"libcrypto.0.9.8.dylib"
|
||||||
|
]
|
||||||
|
openssl_paths = [os.path.abspath(path) for path in names]
|
||||||
|
openssl_paths += names
|
||||||
|
openssl_paths += [
|
||||||
|
"/usr/local/opt/openssl/lib/libcrypto.dylib"
|
||||||
|
]
|
||||||
|
if hasattr(sys, "_MEIPASS") and "RESOURCEPATH" in os.environ:
|
||||||
|
openssl_paths += [
|
||||||
|
os.path.join(os.environ["RESOURCEPATH"], "..", "Frameworks", name)
|
||||||
|
for name in names
|
||||||
|
]
|
||||||
|
openssl_paths.append(ctypes.util.find_library("ssl"))
|
||||||
|
else:
|
||||||
|
# Linux, BSD and such
|
||||||
|
names = [
|
||||||
|
"libcrypto.so",
|
||||||
|
"libssl.so",
|
||||||
|
"libcrypto.so.1.1.0",
|
||||||
|
"libssl.so.1.1.0",
|
||||||
|
"libcrypto.so.1.0.2",
|
||||||
|
"libssl.so.1.0.2",
|
||||||
|
"libcrypto.so.1.0.1",
|
||||||
|
"libssl.so.1.0.1",
|
||||||
|
"libcrypto.so.1.0.0",
|
||||||
|
"libssl.so.1.0.0",
|
||||||
|
"libcrypto.so.0.9.8",
|
||||||
|
"libssl.so.0.9.8"
|
||||||
|
]
|
||||||
|
openssl_paths = [os.path.abspath(path) for path in names]
|
||||||
|
openssl_paths += names
|
||||||
|
if hasattr(sys, "_MEIPASS"):
|
||||||
|
openssl_paths += [os.path.join(sys._MEIPASS, path) for path in names]
|
||||||
|
openssl_paths.append(ctypes.util.find_library("ssl"))
|
||||||
|
lst = user_discover()
|
||||||
|
if isinstance(lst, str):
|
||||||
|
lst = [lst]
|
||||||
|
elif not lst:
|
||||||
|
lst = []
|
||||||
|
return lst + openssl_paths
|
||||||
|
|
||||||
|
|
||||||
|
def discover_library():
|
||||||
|
for path in discover_paths():
|
||||||
|
if path:
|
||||||
|
try:
|
||||||
|
return ctypes.CDLL(path)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
raise OSError("OpenSSL is unavailable")
|
||||||
|
|
||||||
|
|
||||||
|
lib = discover_library()
|
||||||
|
|
||||||
|
# Initialize internal state
|
||||||
|
try:
|
||||||
|
lib.OPENSSL_add_all_algorithms_conf()
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
lib.OpenSSL_version.restype = ctypes.c_char_p
|
||||||
|
openssl_backend = lib.OpenSSL_version(0).decode()
|
||||||
|
except AttributeError:
|
||||||
|
lib.SSLeay_version.restype = ctypes.c_char_p
|
||||||
|
openssl_backend = lib.SSLeay_version(0).decode()
|
||||||
|
|
||||||
|
openssl_backend += " at " + lib._name
|
|
@ -0,0 +1,11 @@
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
|
||||||
|
from .library import openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
class RSA:
|
||||||
|
def get_backend(self):
|
||||||
|
return openssl_backend
|
||||||
|
|
||||||
|
|
||||||
|
rsa = RSA()
|
|
@ -0,0 +1,23 @@
|
||||||
|
Copyright (c) 2012, Packetloop. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
* Neither the name of Packetloop nor the names of its contributors may be
|
||||||
|
used to endorse or promote products derived from this software without
|
||||||
|
specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
|
||||||
|
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,28 @@
|
||||||
|
# subtl
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
SUBTL is a **s**imple **U**DP **B**itTorrent **t**racker **l**ibrary for Python, licenced under the modified BSD license.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
This short example will list a few IP Addresses from a certain hash:
|
||||||
|
|
||||||
|
from subtl import UdpTrackerClient
|
||||||
|
utc = UdpTrackerClient('tracker.openbittorrent.com', 80)
|
||||||
|
utc.connect()
|
||||||
|
if not utc.poll_once():
|
||||||
|
raise Exception('Could not connect')
|
||||||
|
print('Success!')
|
||||||
|
|
||||||
|
utc.announce(info_hash='089184ED52AA37F71801391C451C5D5ADD0D9501')
|
||||||
|
data = utc.poll_once()
|
||||||
|
if not data:
|
||||||
|
raise Exception('Could not announce')
|
||||||
|
for a in data['response']['peers']:
|
||||||
|
print(a)
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
* There is no automatic retrying of sending packets yet.
|
||||||
|
* This library won't download torrent files--it is simply a tracker client.
|
|
@ -0,0 +1,209 @@
|
||||||
|
'''
|
||||||
|
Based on the specification at http://bittorrent.org/beps/bep_0015.html
|
||||||
|
'''
|
||||||
|
import binascii
|
||||||
|
import random
|
||||||
|
import struct
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '0.0.1'
|
||||||
|
|
||||||
|
CONNECT = 0
|
||||||
|
ANNOUNCE = 1
|
||||||
|
SCRAPE = 2
|
||||||
|
ERROR = 3
|
||||||
|
|
||||||
|
|
||||||
|
class UdpTrackerClientException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UdpTrackerClient:
|
||||||
|
|
||||||
|
def __init__(self, host, port):
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
self.peer_port = 6881
|
||||||
|
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
self.conn_id = 0x41727101980
|
||||||
|
self.transactions = {}
|
||||||
|
self.peer_id = self._generate_peer_id()
|
||||||
|
self.timeout = 9
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
return self._send(CONNECT)
|
||||||
|
|
||||||
|
def announce(self, **kwargs):
|
||||||
|
if not kwargs:
|
||||||
|
raise UdpTrackerClientException('arguments missing')
|
||||||
|
args = {
|
||||||
|
'peer_id': self.peer_id,
|
||||||
|
'downloaded': 0,
|
||||||
|
'left': 0,
|
||||||
|
'uploaded': 0,
|
||||||
|
'event': 0,
|
||||||
|
'key': 0,
|
||||||
|
'num_want': 10,
|
||||||
|
'ip_address': 0,
|
||||||
|
'port': self.peer_port,
|
||||||
|
}
|
||||||
|
args.update(kwargs)
|
||||||
|
|
||||||
|
fields = 'info_hash peer_id downloaded left uploaded event ' \
|
||||||
|
'ip_address key num_want port'
|
||||||
|
|
||||||
|
# Check and raise if missing fields
|
||||||
|
self._check_fields(args, fields)
|
||||||
|
|
||||||
|
# Humans tend to use hex representations of the hash. Wasteful humans.
|
||||||
|
args['info_hash'] = args['info_hash']
|
||||||
|
|
||||||
|
values = [args[a] for a in fields.split()]
|
||||||
|
values[1] = values[1].encode("utf8")
|
||||||
|
payload = struct.pack('!20s20sQQQLLLLH', *values)
|
||||||
|
return self._send(ANNOUNCE, payload)
|
||||||
|
|
||||||
|
def scrape(self, info_hash_list):
|
||||||
|
if len(info_hash_list) > 74:
|
||||||
|
raise UdpTrackerClientException('Max info_hashes is 74')
|
||||||
|
|
||||||
|
payload = ''
|
||||||
|
for info_hash in info_hash_list:
|
||||||
|
payload += info_hash
|
||||||
|
|
||||||
|
trans = self._send(SCRAPE, payload)
|
||||||
|
trans['sent_hashes'] = info_hash_list
|
||||||
|
return trans
|
||||||
|
|
||||||
|
def poll_once(self):
|
||||||
|
self.sock.settimeout(self.timeout)
|
||||||
|
try:
|
||||||
|
response = self.sock.recv(10240)
|
||||||
|
except socket.timeout:
|
||||||
|
return
|
||||||
|
|
||||||
|
header = response[:8]
|
||||||
|
payload = response[8:]
|
||||||
|
action, trans_id = struct.unpack('!LL', header)
|
||||||
|
try:
|
||||||
|
trans = self.transactions[trans_id]
|
||||||
|
except KeyError:
|
||||||
|
self.error('transaction_id not found')
|
||||||
|
return
|
||||||
|
trans['response'] = self._process_response(action, payload, trans)
|
||||||
|
trans['completed'] = True
|
||||||
|
del self.transactions[trans_id]
|
||||||
|
return trans
|
||||||
|
|
||||||
|
def error(self, message):
|
||||||
|
raise Exception('error: {}'.format(message))
|
||||||
|
|
||||||
|
def _send(self, action, payload=None):
|
||||||
|
if not payload:
|
||||||
|
payload = b''
|
||||||
|
trans_id, header = self._request_header(action)
|
||||||
|
self.transactions[trans_id] = trans = {
|
||||||
|
'action': action,
|
||||||
|
'time': time.time(),
|
||||||
|
'payload': payload,
|
||||||
|
'completed': False,
|
||||||
|
}
|
||||||
|
self.sock.connect((self.host, self.port))
|
||||||
|
self.sock.send(header + payload)
|
||||||
|
return trans
|
||||||
|
|
||||||
|
def _request_header(self, action):
|
||||||
|
trans_id = random.randint(0, (1 << 32) - 1)
|
||||||
|
return trans_id, struct.pack('!QLL', self.conn_id, action, trans_id)
|
||||||
|
|
||||||
|
def _process_response(self, action, payload, trans):
|
||||||
|
if action == CONNECT:
|
||||||
|
return self._process_connect(payload, trans)
|
||||||
|
elif action == ANNOUNCE:
|
||||||
|
return self._process_announce(payload, trans)
|
||||||
|
elif action == SCRAPE:
|
||||||
|
return self._process_scrape(payload, trans)
|
||||||
|
elif action == ERROR:
|
||||||
|
return self._process_error(payload, trans)
|
||||||
|
else:
|
||||||
|
raise UdpTrackerClientException(
|
||||||
|
'Unknown action response: {}'.format(action))
|
||||||
|
|
||||||
|
def _process_connect(self, payload, trans):
|
||||||
|
self.conn_id = struct.unpack('!Q', payload)[0]
|
||||||
|
return self.conn_id
|
||||||
|
|
||||||
|
def _process_announce(self, payload, trans):
|
||||||
|
response = {}
|
||||||
|
|
||||||
|
info_struct = '!LLL'
|
||||||
|
info_size = struct.calcsize(info_struct)
|
||||||
|
info = payload[:info_size]
|
||||||
|
interval, leechers, seeders = struct.unpack(info_struct, info)
|
||||||
|
|
||||||
|
peer_data = payload[info_size:]
|
||||||
|
peer_struct = '!LH'
|
||||||
|
peer_size = struct.calcsize(peer_struct)
|
||||||
|
peer_count = int(len(peer_data) / peer_size)
|
||||||
|
peers = []
|
||||||
|
|
||||||
|
for peer_offset in range(peer_count):
|
||||||
|
off = peer_size * peer_offset
|
||||||
|
peer = peer_data[off:off + peer_size]
|
||||||
|
addr, port = struct.unpack(peer_struct, peer)
|
||||||
|
peers.append({
|
||||||
|
'addr': socket.inet_ntoa(struct.pack('!L', addr)),
|
||||||
|
'port': port,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'interval': interval,
|
||||||
|
'leechers': leechers,
|
||||||
|
'seeders': seeders,
|
||||||
|
'peers': peers,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _process_scrape(self, payload, trans):
|
||||||
|
info_struct = '!LLL'
|
||||||
|
info_size = struct.calcsize(info_struct)
|
||||||
|
info_count = len(payload) / info_size
|
||||||
|
hashes = trans['sent_hashes']
|
||||||
|
response = {}
|
||||||
|
for info_offset in range(info_count):
|
||||||
|
off = info_size * info_offset
|
||||||
|
info = payload[off:off + info_size]
|
||||||
|
seeders, completed, leechers = struct.unpack(info_struct, info)
|
||||||
|
response[hashes[info_offset]] = {
|
||||||
|
'seeders': seeders,
|
||||||
|
'completed': completed,
|
||||||
|
'leechers': leechers,
|
||||||
|
}
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _process_error(self, payload, trans):
|
||||||
|
'''
|
||||||
|
I haven't seen this action type be sent from a tracker, but I've left
|
||||||
|
it here for the possibility.
|
||||||
|
'''
|
||||||
|
self.error(payload)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _generate_peer_id(self):
|
||||||
|
'''http://www.bittorrent.org/beps/bep_0020.html'''
|
||||||
|
peer_id = '-PU' + __version__.replace('.', '-') + '-'
|
||||||
|
remaining = 20 - len(peer_id)
|
||||||
|
numbers = [str(random.randint(0, 9)) for _ in range(remaining)]
|
||||||
|
peer_id += ''.join(numbers)
|
||||||
|
assert(len(peer_id) == 20)
|
||||||
|
return peer_id
|
||||||
|
|
||||||
|
def _check_fields(self, args, fields):
|
||||||
|
for f in fields:
|
||||||
|
try:
|
||||||
|
args.get(f)
|
||||||
|
except KeyError:
|
||||||
|
raise UdpTrackerClientException('field missing: {}'.format(f))
|
||||||
|
|
Loading…
Reference in New Issue