diff --git a/plugins/AnnounceBitTorrent/AnnounceBitTorrentPlugin.py b/plugins/AnnounceBitTorrent/AnnounceBitTorrentPlugin.py
new file mode 100644
index 00000000..734070dd
--- /dev/null
+++ b/plugins/AnnounceBitTorrent/AnnounceBitTorrentPlugin.py
@@ -0,0 +1,148 @@
+import time
+import urllib.request
+import struct
+import socket
+
+import lib.bencode_open as bencode_open
+from lib.subtl.subtl import UdpTrackerClient
+import socks
+import sockshandler
+import gevent
+
+from Plugin import PluginManager
+from Config import config
+from Debug import Debug
+from util import helper
+
+
+# We can only import plugin host clases after the plugins are loaded
+@PluginManager.afterLoad
+def importHostClasses():
+ global Peer, AnnounceError
+ from Peer import Peer
+ from Site.SiteAnnouncer import AnnounceError
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def getSupportedTrackers(self):
+ trackers = super(SiteAnnouncerPlugin, self).getSupportedTrackers()
+ if config.disable_udp or config.trackers_proxy != "disable":
+ trackers = [tracker for tracker in trackers if not tracker.startswith("udp://")]
+
+ return trackers
+
+ def getTrackerHandler(self, protocol):
+ if protocol == "udp":
+ handler = self.announceTrackerUdp
+ elif protocol == "http":
+ handler = self.announceTrackerHttp
+ elif protocol == "https":
+ handler = self.announceTrackerHttps
+ else:
+ handler = super(SiteAnnouncerPlugin, self).getTrackerHandler(protocol)
+ return handler
+
+ def announceTrackerUdp(self, tracker_address, mode="start", num_want=10):
+ s = time.time()
+ if config.disable_udp:
+ raise AnnounceError("Udp disabled by config")
+ if config.trackers_proxy != "disable":
+ raise AnnounceError("Udp trackers not available with proxies")
+
+ ip, port = tracker_address.split("/")[0].split(":")
+ tracker = UdpTrackerClient(ip, int(port))
+ if self.connection_server.getIpType(ip) in self.getOpenedServiceTypes():
+ tracker.peer_port = self.fileserver_port
+ else:
+ tracker.peer_port = 0
+ tracker.connect()
+ if not tracker.poll_once():
+ raise AnnounceError("Could not connect")
+ tracker.announce(info_hash=self.site.address_sha1, num_want=num_want, left=431102370)
+ back = tracker.poll_once()
+ if not back:
+ raise AnnounceError("No response after %.0fs" % (time.time() - s))
+ elif type(back) is dict and "response" in back:
+ peers = back["response"]["peers"]
+ else:
+ raise AnnounceError("Invalid response: %r" % back)
+
+ return peers
+
+ def httpRequest(self, url):
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+ 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
+ 'Accept-Encoding': 'none',
+ 'Accept-Language': 'en-US,en;q=0.8',
+ 'Connection': 'keep-alive'
+ }
+
+ req = urllib.request.Request(url, headers=headers)
+
+ if config.trackers_proxy == "tor":
+ tor_manager = self.site.connection_server.tor_manager
+ handler = sockshandler.SocksiPyHandler(socks.SOCKS5, tor_manager.proxy_ip, tor_manager.proxy_port)
+ opener = urllib.request.build_opener(handler)
+ return opener.open(req, timeout=50)
+ elif config.trackers_proxy == "disable":
+ return urllib.request.urlopen(req, timeout=25)
+ else:
+ proxy_ip, proxy_port = config.trackers_proxy.split(":")
+ handler = sockshandler.SocksiPyHandler(socks.SOCKS5, proxy_ip, int(proxy_port))
+ opener = urllib.request.build_opener(handler)
+ return opener.open(req, timeout=50)
+
+ def announceTrackerHttps(self, *args, **kwargs):
+ kwargs["protocol"] = "https"
+ return self.announceTrackerHttp(*args, **kwargs)
+
+ def announceTrackerHttp(self, tracker_address, mode="start", num_want=10, protocol="http"):
+ tracker_ip, tracker_port = tracker_address.rsplit(":", 1)
+ if self.connection_server.getIpType(tracker_ip) in self.getOpenedServiceTypes():
+ port = self.fileserver_port
+ else:
+ port = 1
+ params = {
+ 'info_hash': self.site.address_sha1,
+ 'peer_id': self.peer_id, 'port': port,
+ 'uploaded': 0, 'downloaded': 0, 'left': 431102370, 'compact': 1, 'numwant': num_want,
+ 'event': 'started'
+ }
+
+ url = protocol + "://" + tracker_address + "?" + urllib.parse.urlencode(params)
+
+ s = time.time()
+ response = None
+ # Load url
+ if config.tor == "always" or config.trackers_proxy != "disable":
+ timeout = 60
+ else:
+ timeout = 30
+
+ with gevent.Timeout(timeout, False): # Make sure of timeout
+ req = self.httpRequest(url)
+ response = req.read()
+ req.close()
+ req = None
+
+ if not response:
+ raise AnnounceError("No response after %.0fs" % (time.time() - s))
+
+ # Decode peers
+ try:
+ peer_data = bencode_open.loads(response)[b"peers"]
+ response = None
+ peer_count = int(len(peer_data) / 6)
+ peers = []
+ for peer_offset in range(peer_count):
+ off = 6 * peer_offset
+ peer = peer_data[off:off + 6]
+ addr, port = struct.unpack('!LH', peer)
+ peers.append({"addr": socket.inet_ntoa(struct.pack('!L', addr)), "port": port})
+ except Exception as err:
+ raise AnnounceError("Invalid response: %r (%s)" % (response, Debug.formatException(err)))
+
+ return peers
diff --git a/plugins/AnnounceBitTorrent/__init__.py b/plugins/AnnounceBitTorrent/__init__.py
new file mode 100644
index 00000000..c7422855
--- /dev/null
+++ b/plugins/AnnounceBitTorrent/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceBitTorrentPlugin
\ No newline at end of file
diff --git a/plugins/AnnounceBitTorrent/plugin_info.json b/plugins/AnnounceBitTorrent/plugin_info.json
new file mode 100644
index 00000000..824749ee
--- /dev/null
+++ b/plugins/AnnounceBitTorrent/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "AnnounceBitTorrent",
+ "description": "Discover new peers using BitTorrent trackers.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/AnnounceLocal/AnnounceLocalPlugin.py b/plugins/AnnounceLocal/AnnounceLocalPlugin.py
new file mode 100644
index 00000000..01202774
--- /dev/null
+++ b/plugins/AnnounceLocal/AnnounceLocalPlugin.py
@@ -0,0 +1,147 @@
+import time
+
+import gevent
+
+from Plugin import PluginManager
+from Config import config
+from . import BroadcastServer
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def announce(self, force=False, *args, **kwargs):
+ local_announcer = self.site.connection_server.local_announcer
+
+ thread = None
+ if local_announcer and (force or time.time() - local_announcer.last_discover > 5 * 60):
+ thread = gevent.spawn(local_announcer.discover, force=force)
+ back = super(SiteAnnouncerPlugin, self).announce(force=force, *args, **kwargs)
+
+ if thread:
+ thread.join()
+
+ return back
+
+
+class LocalAnnouncer(BroadcastServer.BroadcastServer):
+ def __init__(self, server, listen_port):
+ super(LocalAnnouncer, self).__init__("zeronet", listen_port=listen_port)
+ self.server = server
+
+ self.sender_info["peer_id"] = self.server.peer_id
+ self.sender_info["port"] = self.server.port
+ self.sender_info["broadcast_port"] = listen_port
+ self.sender_info["rev"] = config.rev
+
+ self.known_peers = {}
+ self.last_discover = 0
+
+ def discover(self, force=False):
+ self.log.debug("Sending discover request (force: %s)" % force)
+ self.last_discover = time.time()
+ if force: # Probably new site added, clean cache
+ self.known_peers = {}
+
+ for peer_id, known_peer in list(self.known_peers.items()):
+ if time.time() - known_peer["found"] > 20 * 60:
+ del(self.known_peers[peer_id])
+ self.log.debug("Timeout, removing from known_peers: %s" % peer_id)
+ self.broadcast({"cmd": "discoverRequest", "params": {}}, port=self.listen_port)
+
+ def actionDiscoverRequest(self, sender, params):
+ back = {
+ "cmd": "discoverResponse",
+ "params": {
+ "sites_changed": self.server.site_manager.sites_changed
+ }
+ }
+
+ if sender["peer_id"] not in self.known_peers:
+ self.known_peers[sender["peer_id"]] = {"added": time.time(), "sites_changed": 0, "updated": 0, "found": time.time()}
+ self.log.debug("Got discover request from unknown peer %s (%s), time to refresh known peers" % (sender["ip"], sender["peer_id"]))
+ gevent.spawn_later(1.0, self.discover) # Let the response arrive first to the requester
+
+ return back
+
+ def actionDiscoverResponse(self, sender, params):
+ if sender["peer_id"] in self.known_peers:
+ self.known_peers[sender["peer_id"]]["found"] = time.time()
+ if params["sites_changed"] != self.known_peers.get(sender["peer_id"], {}).get("sites_changed"):
+ # Peer's site list changed, request the list of new sites
+ return {"cmd": "siteListRequest"}
+ else:
+ # Peer's site list is the same
+ for site in self.server.sites.values():
+ peer = site.peers.get("%s:%s" % (sender["ip"], sender["port"]))
+ if peer:
+ peer.found("local")
+
+ def actionSiteListRequest(self, sender, params):
+ back = []
+ sites = list(self.server.sites.values())
+
+ # Split adresses to group of 100 to avoid UDP size limit
+ site_groups = [sites[i:i + 100] for i in range(0, len(sites), 100)]
+ for site_group in site_groups:
+ res = {}
+ res["sites_changed"] = self.server.site_manager.sites_changed
+ res["sites"] = [site.address_hash for site in site_group]
+ back.append({"cmd": "siteListResponse", "params": res})
+ return back
+
+ def actionSiteListResponse(self, sender, params):
+ s = time.time()
+ peer_sites = set(params["sites"])
+ num_found = 0
+ added_sites = []
+ for site in self.server.sites.values():
+ if site.address_hash in peer_sites:
+ added = site.addPeer(sender["ip"], sender["port"], source="local")
+ num_found += 1
+ if added:
+ site.worker_manager.onPeers()
+ site.updateWebsocket(peers_added=1)
+ added_sites.append(site)
+
+ # Save sites changed value to avoid unnecessary site list download
+ if sender["peer_id"] not in self.known_peers:
+ self.known_peers[sender["peer_id"]] = {"added": time.time()}
+
+ self.known_peers[sender["peer_id"]]["sites_changed"] = params["sites_changed"]
+ self.known_peers[sender["peer_id"]]["updated"] = time.time()
+ self.known_peers[sender["peer_id"]]["found"] = time.time()
+
+ self.log.debug(
+ "Tracker result: Discover from %s response parsed in %.3fs, found: %s added: %s of %s" %
+ (sender["ip"], time.time() - s, num_found, added_sites, len(peer_sites))
+ )
+
+
+@PluginManager.registerTo("FileServer")
+class FileServerPlugin(object):
+ def __init__(self, *args, **kwargs):
+ super(FileServerPlugin, self).__init__(*args, **kwargs)
+ if config.broadcast_port and config.tor != "always" and not config.disable_udp:
+ self.local_announcer = LocalAnnouncer(self, config.broadcast_port)
+ else:
+ self.local_announcer = None
+
+ def start(self, *args, **kwargs):
+ if self.local_announcer:
+ gevent.spawn(self.local_announcer.start)
+ return super(FileServerPlugin, self).start(*args, **kwargs)
+
+ def stop(self, ui_websocket=None):
+ if self.local_announcer:
+ self.local_announcer.stop()
+ res = super(FileServerPlugin, self).stop(ui_websocket=ui_websocket)
+ return res
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ group = self.parser.add_argument_group("AnnounceLocal plugin")
+ group.add_argument('--broadcast_port', help='UDP broadcasting port for local peer discovery', default=1544, type=int, metavar='port')
+
+ return super(ConfigPlugin, self).createArguments()
diff --git a/plugins/AnnounceLocal/BroadcastServer.py b/plugins/AnnounceLocal/BroadcastServer.py
new file mode 100644
index 00000000..74678896
--- /dev/null
+++ b/plugins/AnnounceLocal/BroadcastServer.py
@@ -0,0 +1,139 @@
+import socket
+import logging
+import time
+from contextlib import closing
+
+from Debug import Debug
+from util import UpnpPunch
+from util import Msgpack
+
+
+class BroadcastServer(object):
+ def __init__(self, service_name, listen_port=1544, listen_ip=''):
+ self.log = logging.getLogger("BroadcastServer")
+ self.listen_port = listen_port
+ self.listen_ip = listen_ip
+
+ self.running = False
+ self.sock = None
+ self.sender_info = {"service": service_name}
+
+ def createBroadcastSocket(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if hasattr(socket, 'SO_REUSEPORT'):
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except Exception as err:
+ self.log.warning("Error setting SO_REUSEPORT: %s" % err)
+
+ binded = False
+ for retry in range(3):
+ try:
+ sock.bind((self.listen_ip, self.listen_port))
+ binded = True
+ break
+ except Exception as err:
+ self.log.error(
+ "Socket bind to %s:%s error: %s, retry #%s" %
+ (self.listen_ip, self.listen_port, Debug.formatException(err), retry)
+ )
+ time.sleep(retry)
+
+ if binded:
+ return sock
+ else:
+ return False
+
+ def start(self): # Listens for discover requests
+ self.sock = self.createBroadcastSocket()
+ if not self.sock:
+ self.log.error("Unable to listen on port %s" % self.listen_port)
+ return
+
+ self.log.debug("Started on port %s" % self.listen_port)
+
+ self.running = True
+
+ while self.running:
+ try:
+ data, addr = self.sock.recvfrom(8192)
+ except Exception as err:
+ if self.running:
+ self.log.error("Listener receive error: %s" % err)
+ continue
+
+ if not self.running:
+ break
+
+ try:
+ message = Msgpack.unpack(data)
+ response_addr, message = self.handleMessage(addr, message)
+ if message:
+ self.send(response_addr, message)
+ except Exception as err:
+ self.log.error("Handlemessage error: %s" % Debug.formatException(err))
+ self.log.debug("Stopped listening on port %s" % self.listen_port)
+
+ def stop(self):
+ self.log.debug("Stopping, socket: %s" % self.sock)
+ self.running = False
+ if self.sock:
+ self.sock.close()
+
+ def send(self, addr, message):
+ if type(message) is not list:
+ message = [message]
+
+ for message_part in message:
+ message_part["sender"] = self.sender_info
+
+ self.log.debug("Send to %s: %s" % (addr, message_part["cmd"]))
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.sendto(Msgpack.pack(message_part), addr)
+
+ def getMyIps(self):
+ return UpnpPunch._get_local_ips()
+
+ def broadcast(self, message, port=None):
+ if not port:
+ port = self.listen_port
+
+ my_ips = self.getMyIps()
+ addr = ("255.255.255.255", port)
+
+ message["sender"] = self.sender_info
+ self.log.debug("Broadcast using ips %s on port %s: %s" % (my_ips, port, message["cmd"]))
+
+ for my_ip in my_ips:
+ try:
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ sock.bind((my_ip, 0))
+ sock.sendto(Msgpack.pack(message), addr)
+ except Exception as err:
+ self.log.warning("Error sending broadcast using ip %s: %s" % (my_ip, err))
+
+ def handleMessage(self, addr, message):
+ self.log.debug("Got from %s: %s" % (addr, message["cmd"]))
+ cmd = message["cmd"]
+ params = message.get("params", {})
+ sender = message["sender"]
+ sender["ip"] = addr[0]
+
+ func_name = "action" + cmd[0].upper() + cmd[1:]
+ func = getattr(self, func_name, None)
+
+ if sender["service"] != "zeronet" or sender["peer_id"] == self.sender_info["peer_id"]:
+ # Skip messages not for us or sent by us
+ message = None
+ elif func:
+ message = func(sender, params)
+ else:
+ self.log.debug("Unknown cmd: %s" % cmd)
+ message = None
+
+ return (sender["ip"], sender["broadcast_port"]), message
diff --git a/plugins/AnnounceLocal/Test/TestAnnounce.py b/plugins/AnnounceLocal/Test/TestAnnounce.py
new file mode 100644
index 00000000..4def02ed
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/TestAnnounce.py
@@ -0,0 +1,113 @@
+import time
+import copy
+
+import gevent
+import pytest
+import mock
+
+from AnnounceLocal import AnnounceLocalPlugin
+from File import FileServer
+from Test import Spy
+
+@pytest.fixture
+def announcer(file_server, site):
+ file_server.sites[site.address] = site
+ announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server, listen_port=1100)
+ file_server.local_announcer = announcer
+ announcer.listen_port = 1100
+ announcer.sender_info["broadcast_port"] = 1100
+ announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
+ announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
+ gevent.spawn(announcer.start)
+ time.sleep(0.5)
+
+ assert file_server.local_announcer.running
+ return file_server.local_announcer
+
+@pytest.fixture
+def announcer_remote(request, site_temp):
+ file_server_remote = FileServer("127.0.0.1", 1545)
+ file_server_remote.sites[site_temp.address] = site_temp
+ announcer = AnnounceLocalPlugin.LocalAnnouncer(file_server_remote, listen_port=1101)
+ file_server_remote.local_announcer = announcer
+ announcer.listen_port = 1101
+ announcer.sender_info["broadcast_port"] = 1101
+ announcer.getMyIps = mock.MagicMock(return_value=["127.0.0.1"])
+ announcer.discover = mock.MagicMock(return_value=False) # Don't send discover requests automatically
+ gevent.spawn(announcer.start)
+ time.sleep(0.5)
+
+ assert file_server_remote.local_announcer.running
+
+ def cleanup():
+ file_server_remote.stop()
+ request.addfinalizer(cleanup)
+
+
+ return file_server_remote.local_announcer
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestAnnounce:
+ def testSenderInfo(self, announcer):
+ sender_info = announcer.sender_info
+ assert sender_info["port"] > 0
+ assert len(sender_info["peer_id"]) == 20
+ assert sender_info["rev"] > 0
+
+ def testIgnoreSelfMessages(self, announcer):
+ # No response to messages that has same peer_id as server
+ assert not announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": announcer.sender_info, "params": {}})[1]
+
+ # Response to messages with different peer id
+ sender_info = copy.copy(announcer.sender_info)
+ sender_info["peer_id"] += "-"
+ addr, res = announcer.handleMessage(("0.0.0.0", 123), {"cmd": "discoverRequest", "sender": sender_info, "params": {}})
+ assert res["params"]["sites_changed"] > 0
+
+ def testDiscoverRequest(self, announcer, announcer_remote):
+ assert len(announcer_remote.known_peers) == 0
+ with Spy.Spy(announcer_remote, "handleMessage") as responses:
+ announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
+ time.sleep(0.1)
+
+ response_cmds = [response[1]["cmd"] for response in responses]
+ assert response_cmds == ["discoverResponse", "siteListResponse"]
+ assert len(responses[-1][1]["params"]["sites"]) == 1
+
+ # It should only request siteList if sites_changed value is different from last response
+ with Spy.Spy(announcer_remote, "handleMessage") as responses:
+ announcer_remote.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer.listen_port)
+ time.sleep(0.1)
+
+ response_cmds = [response[1]["cmd"] for response in responses]
+ assert response_cmds == ["discoverResponse"]
+
+ def testPeerDiscover(self, announcer, announcer_remote, site):
+ assert announcer.server.peer_id != announcer_remote.server.peer_id
+ assert len(list(announcer.server.sites.values())[0].peers) == 0
+ announcer.broadcast({"cmd": "discoverRequest"}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert len(list(announcer.server.sites.values())[0].peers) == 1
+
+ def testRecentPeerList(self, announcer, announcer_remote, site):
+ assert len(site.peers_recent) == 0
+ assert len(site.peers) == 0
+ with Spy.Spy(announcer, "handleMessage") as responses:
+ announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert [response[1]["cmd"] for response in responses] == ["discoverResponse", "siteListResponse"]
+ assert len(site.peers_recent) == 1
+ assert len(site.peers) == 1
+
+ # It should update peer without siteListResponse
+ last_time_found = list(site.peers.values())[0].time_found
+ site.peers_recent.clear()
+ with Spy.Spy(announcer, "handleMessage") as responses:
+ announcer.broadcast({"cmd": "discoverRequest", "params": {}}, port=announcer_remote.listen_port)
+ time.sleep(0.1)
+ assert [response[1]["cmd"] for response in responses] == ["discoverResponse"]
+ assert len(site.peers_recent) == 1
+ assert list(site.peers.values())[0].time_found > last_time_found
+
+
diff --git a/plugins/AnnounceLocal/Test/conftest.py b/plugins/AnnounceLocal/Test/conftest.py
new file mode 100644
index 00000000..a88c642c
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/conftest.py
@@ -0,0 +1,4 @@
+from src.Test.conftest import *
+
+from Config import config
+config.broadcast_port = 0
diff --git a/plugins/AnnounceLocal/Test/pytest.ini b/plugins/AnnounceLocal/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/AnnounceLocal/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/AnnounceLocal/__init__.py b/plugins/AnnounceLocal/__init__.py
new file mode 100644
index 00000000..5b80abd2
--- /dev/null
+++ b/plugins/AnnounceLocal/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceLocalPlugin
\ No newline at end of file
diff --git a/plugins/AnnounceLocal/plugin_info.json b/plugins/AnnounceLocal/plugin_info.json
new file mode 100644
index 00000000..2908cbf1
--- /dev/null
+++ b/plugins/AnnounceLocal/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "AnnounceLocal",
+ "description": "Discover LAN clients using UDP broadcasting.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/AnnounceZero/AnnounceZeroPlugin.py b/plugins/AnnounceZero/AnnounceZeroPlugin.py
new file mode 100644
index 00000000..e71e221e
--- /dev/null
+++ b/plugins/AnnounceZero/AnnounceZeroPlugin.py
@@ -0,0 +1,143 @@
+import logging
+import time
+import itertools
+
+from Plugin import PluginManager
+from util import helper
+from Crypt import CryptRsa
+from Crypt import CryptEd25519
+
+allow_reload = False # No source reload supported in this plugin
+time_full_announced = {} # Tracker address: Last announced all site to tracker
+connection_pool = {} # Tracker address: Peer object
+
+
+# We can only import plugin host clases after the plugins are loaded
+@PluginManager.afterLoad
+def importHostClasses():
+ global Peer, AnnounceError
+ from Peer import Peer
+ from Site.SiteAnnouncer import AnnounceError
+
+
+# Process result got back from tracker
+def processPeerRes(tracker_address, site, peers):
+ added = 0
+
+ # Onion
+ found_onion = 0
+ for packed_address in peers["onion"]:
+ found_onion += 1
+ peer_onion, peer_port = helper.unpackOnionAddress(packed_address)
+ if site.addPeer(peer_onion, peer_port, source="tracker"):
+ added += 1
+
+ # Ip4
+ found_ipv4 = 0
+ peers_normal = itertools.chain(peers.get("ip4", []), peers.get("ipv4", []), peers.get("ipv6", []))
+ for packed_address in peers_normal:
+ found_ipv4 += 1
+ peer_ip, peer_port = helper.unpackAddress(packed_address)
+ if site.addPeer(peer_ip, peer_port, source="tracker"):
+ added += 1
+
+ if added:
+ site.worker_manager.onPeers()
+ site.updateWebsocket(peers_added=added)
+ return added
+
+
+@PluginManager.registerTo("SiteAnnouncer")
+class SiteAnnouncerPlugin(object):
+ def getTrackerHandler(self, protocol):
+ if protocol == "zero":
+ return self.announceTrackerZero
+ else:
+ return super(SiteAnnouncerPlugin, self).getTrackerHandler(protocol)
+
+ def announceTrackerZero(self, tracker_address, mode="start", num_want=10):
+ global time_full_announced
+ s = time.time()
+
+ need_types = ["ip4"] # ip4 for backward compatibility reasons
+ need_types += self.site.connection_server.supported_ip_types
+ if self.site.connection_server.tor_manager.enabled:
+ need_types.append("onion")
+
+ if mode == "start" or mode == "more": # Single: Announce only this site
+ sites = [self.site]
+ full_announce = False
+ else: # Multi: Announce all currently serving site
+ full_announce = True
+ if time.time() - time_full_announced.get(tracker_address, 0) < 60 * 15: # No reannounce all sites within short time
+ return None
+ time_full_announced[tracker_address] = time.time()
+ from Site import SiteManager
+ sites = [site for site in SiteManager.site_manager.sites.values() if site.isServing()]
+
+ # Create request
+ add_types = self.getOpenedServiceTypes()
+ request = {
+ "hashes": [], "onions": [], "port": self.fileserver_port, "need_types": need_types, "need_num": 20, "add": add_types
+ }
+ for site in sites:
+ if "onion" in add_types:
+ onion = self.site.connection_server.tor_manager.getOnion(site.address)
+ request["onions"].append(onion)
+ request["hashes"].append(site.address_hash)
+
+ # Tracker can remove sites that we don't announce
+ if full_announce:
+ request["delete"] = True
+
+ # Sent request to tracker
+ tracker_peer = connection_pool.get(tracker_address) # Re-use tracker connection if possible
+ if not tracker_peer:
+ tracker_ip, tracker_port = tracker_address.rsplit(":", 1)
+ tracker_peer = Peer(str(tracker_ip), int(tracker_port), connection_server=self.site.connection_server)
+ tracker_peer.is_tracker_connection = True
+ #tracker_peer.log_level = logging.INFO
+ connection_pool[tracker_address] = tracker_peer
+
+ res = tracker_peer.request("announce", request)
+
+ if not res or "peers" not in res:
+ if full_announce:
+ time_full_announced[tracker_address] = 0
+ raise AnnounceError("Invalid response: %s" % res)
+
+ # Add peers from response to site
+ site_index = 0
+ peers_added = 0
+ for site_res in res["peers"]:
+ site = sites[site_index]
+ peers_added += processPeerRes(tracker_address, site, site_res)
+ site_index += 1
+
+ # Check if we need to sign prove the onion addresses
+ if "onion_sign_this" in res:
+ self.site.log.debug("Signing %s for %s to add %s onions" % (res["onion_sign_this"], tracker_address, len(sites)))
+ request["onion_signs"] = {}
+ request["onion_sign_this"] = res["onion_sign_this"]
+ request["need_num"] = 0
+ for site in sites:
+ onion = self.site.connection_server.tor_manager.getOnion(site.address)
+ publickey = self.site.connection_server.tor_manager.getPublickey(onion)
+ if publickey not in request["onion_signs"]:
+ sign = CryptRsa.sign(res["onion_sign_this"].encode("utf8"), self.site.connection_server.tor_manager.getPrivatekey(onion))
+ request["onion_signs"][publickey] = sign
+ res = tracker_peer.request("announce", request)
+ if not res or "onion_sign_this" in res:
+ if full_announce:
+ time_full_announced[tracker_address] = 0
+ raise AnnounceError("Announce onion address to failed: %s" % res)
+
+ if full_announce:
+ tracker_peer.remove() # Close connection, we don't need it in next 5 minute
+
+ self.site.log.debug(
+ "Tracker announce result: zero://%s (sites: %s, new peers: %s, add: %s, mode: %s) in %.3fs" %
+ (tracker_address, site_index, peers_added, add_types, mode, time.time() - s)
+ )
+
+ return True
diff --git a/plugins/AnnounceZero/__init__.py b/plugins/AnnounceZero/__init__.py
new file mode 100644
index 00000000..8aec5ddb
--- /dev/null
+++ b/plugins/AnnounceZero/__init__.py
@@ -0,0 +1 @@
+from . import AnnounceZeroPlugin
\ No newline at end of file
diff --git a/plugins/AnnounceZero/plugin_info.json b/plugins/AnnounceZero/plugin_info.json
new file mode 100644
index 00000000..50e7cf7f
--- /dev/null
+++ b/plugins/AnnounceZero/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "AnnounceZero",
+ "description": "Announce using ZeroNet protocol.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/Benchmark/BenchmarkDb.py b/plugins/Benchmark/BenchmarkDb.py
new file mode 100644
index 00000000..a767a3f4
--- /dev/null
+++ b/plugins/Benchmark/BenchmarkDb.py
@@ -0,0 +1,143 @@
+import os
+import json
+import contextlib
+import time
+
+from Plugin import PluginManager
+from Config import config
+
+
+@PluginManager.registerTo("Actions")
+class ActionsPlugin:
+ def getBenchmarkTests(self, online=False):
+ tests = super().getBenchmarkTests(online)
+ tests.extend([
+ {"func": self.testDbConnect, "num": 10, "time_standard": 0.27},
+ {"func": self.testDbInsert, "num": 10, "time_standard": 0.91},
+ {"func": self.testDbInsertMultiuser, "num": 1, "time_standard": 0.57},
+ {"func": self.testDbQueryIndexed, "num": 1000, "time_standard": 0.84},
+ {"func": self.testDbQueryNotIndexed, "num": 1000, "time_standard": 1.30}
+ ])
+ return tests
+
+
+ @contextlib.contextmanager
+ def getTestDb(self):
+ from Db import Db
+ path = "%s/benchmark.db" % config.data_dir
+ if os.path.isfile(path):
+ os.unlink(path)
+ schema = {
+ "db_name": "TestDb",
+ "db_file": path,
+ "maps": {
+ ".*": {
+ "to_table": {
+ "test": "test"
+ }
+ }
+ },
+ "tables": {
+ "test": {
+ "cols": [
+ ["test_id", "INTEGER"],
+ ["title", "TEXT"],
+ ["json_id", "INTEGER REFERENCES json (json_id)"]
+ ],
+ "indexes": ["CREATE UNIQUE INDEX test_key ON test(test_id, json_id)"],
+ "schema_changed": 1426195822
+ }
+ }
+ }
+
+ db = Db.Db(schema, path)
+
+ yield db
+
+ db.close()
+ if os.path.isfile(path):
+ os.unlink(path)
+
+ def testDbConnect(self, num_run=1):
+ import sqlite3
+ for i in range(num_run):
+ with self.getTestDb() as db:
+ db.checkTables()
+ yield "."
+ yield "(SQLite version: %s, API: %s)" % (sqlite3.sqlite_version, sqlite3.version)
+
+ def testDbInsert(self, num_run=1):
+ yield "x 1000 lines "
+ for u in range(num_run):
+ with self.getTestDb() as db:
+ db.checkTables()
+ data = {"test": []}
+ for i in range(1000): # 1000 line of data
+ data["test"].append({"test_id": i, "title": "Testdata for %s message %s" % (u, i)})
+ json.dump(data, open("%s/test_%s.json" % (config.data_dir, u), "w"))
+ db.updateJson("%s/test_%s.json" % (config.data_dir, u))
+ os.unlink("%s/test_%s.json" % (config.data_dir, u))
+ assert db.execute("SELECT COUNT(*) FROM test").fetchone()[0] == 1000
+ yield "."
+
+ def fillTestDb(self, db):
+ db.checkTables()
+ cur = db.getCursor()
+ cur.logging = False
+ for u in range(100, 200): # 100 user
+ data = {"test": []}
+ for i in range(100): # 1000 line of data
+ data["test"].append({"test_id": i, "title": "Testdata for %s message %s" % (u, i)})
+ json.dump(data, open("%s/test_%s.json" % (config.data_dir, u), "w"))
+ db.updateJson("%s/test_%s.json" % (config.data_dir, u), cur=cur)
+ os.unlink("%s/test_%s.json" % (config.data_dir, u))
+ if u % 10 == 0:
+ yield "."
+
+ def testDbInsertMultiuser(self, num_run=1):
+ yield "x 100 users x 100 lines "
+ for u in range(num_run):
+ with self.getTestDb() as db:
+ for progress in self.fillTestDb(db):
+ yield progress
+ num_rows = db.execute("SELECT COUNT(*) FROM test").fetchone()[0]
+ assert num_rows == 10000, "%s != 10000" % num_rows
+
+ def testDbQueryIndexed(self, num_run=1):
+ s = time.time()
+ with self.getTestDb() as db:
+ for progress in self.fillTestDb(db):
+ pass
+ yield " (Db warmup done in %.3fs) " % (time.time() - s)
+ found_total = 0
+ for i in range(num_run): # 1000x by test_id
+ found = 0
+ res = db.execute("SELECT * FROM test WHERE test_id = %s" % (i % 100))
+ for row in res:
+ found_total += 1
+ found += 1
+ del(res)
+ yield "."
+ assert found == 100, "%s != 100 (i: %s)" % (found, i)
+ yield "Found: %s" % found_total
+
+ def testDbQueryNotIndexed(self, num_run=1):
+ s = time.time()
+ with self.getTestDb() as db:
+ for progress in self.fillTestDb(db):
+ pass
+ yield " (Db warmup done in %.3fs) " % (time.time() - s)
+ found_total = 0
+ for i in range(num_run): # 1000x by test_id
+ found = 0
+ res = db.execute("SELECT * FROM test WHERE json_id = %s" % i)
+ for row in res:
+ found_total += 1
+ found += 1
+ yield "."
+ del(res)
+ if i == 0 or i > 100:
+ assert found == 0, "%s != 0 (i: %s)" % (found, i)
+ else:
+ assert found == 100, "%s != 100 (i: %s)" % (found, i)
+ yield "Found: %s" % found_total
diff --git a/plugins/Benchmark/BenchmarkPack.py b/plugins/Benchmark/BenchmarkPack.py
new file mode 100644
index 00000000..6b92e43a
--- /dev/null
+++ b/plugins/Benchmark/BenchmarkPack.py
@@ -0,0 +1,183 @@
+import os
+import io
+from collections import OrderedDict
+
+from Plugin import PluginManager
+from Config import config
+from util import Msgpack
+
+
+@PluginManager.registerTo("Actions")
+class ActionsPlugin:
+ def createZipFile(self, path):
+ import zipfile
+ test_data = b"Test" * 1024
+ file_name = b"\xc3\x81rv\xc3\xadzt\xc5\xb1r\xc5\x91%s.txt".decode("utf8")
+ with zipfile.ZipFile(path, 'w') as archive:
+ for y in range(100):
+ zip_info = zipfile.ZipInfo(file_name % y, (1980, 1, 1, 0, 0, 0))
+ zip_info.compress_type = zipfile.ZIP_DEFLATED
+ zip_info.create_system = 3
+ zip_info.flag_bits = 0
+ zip_info.external_attr = 25165824
+ archive.writestr(zip_info, test_data)
+
+ def testPackZip(self, num_run=1):
+ """
+ Test zip file creating
+ """
+ yield "x 100 x 5KB "
+ from Crypt import CryptHash
+ zip_path = '%s/test.zip' % config.data_dir
+ for i in range(num_run):
+ self.createZipFile(zip_path)
+ yield "."
+
+ archive_size = os.path.getsize(zip_path) / 1024
+ yield "(Generated file size: %.2fkB)" % archive_size
+
+ hash = CryptHash.sha512sum(open(zip_path, "rb"))
+ valid = "cb32fb43783a1c06a2170a6bc5bb228a032b67ff7a1fd7a5efb9b467b400f553"
+ assert hash == valid, "Invalid hash: %s != %s
" % (hash, valid)
+ os.unlink(zip_path)
+
+ def testUnpackZip(self, num_run=1):
+ """
+ Test zip file reading
+ """
+ yield "x 100 x 5KB "
+ import zipfile
+ zip_path = '%s/test.zip' % config.data_dir
+ test_data = b"Test" * 1024
+ file_name = b"\xc3\x81rv\xc3\xadzt\xc5\xb1r\xc5\x91".decode("utf8")
+
+ self.createZipFile(zip_path)
+ for i in range(num_run):
+ with zipfile.ZipFile(zip_path) as archive:
+ for f in archive.filelist:
+ assert f.filename.startswith(file_name), "Invalid filename: %s != %s" % (f.filename, file_name)
+ data = archive.open(f.filename).read()
+ assert archive.open(f.filename).read() == test_data, "Invalid data: %s..." % data[0:30]
+ yield "."
+
+ os.unlink(zip_path)
+
+ def createArchiveFile(self, path, archive_type="gz"):
+ import tarfile
+ import gzip
+
+ # Monkey patch _init_write_gz to use fixed date in order to keep the hash independent from datetime
+ def nodate_write_gzip_header(self):
+ self._write_mtime = 0
+ original_write_gzip_header(self)
+
+ test_data_io = io.BytesIO(b"Test" * 1024)
+ file_name = b"\xc3\x81rv\xc3\xadzt\xc5\xb1r\xc5\x91%s.txt".decode("utf8")
+
+ original_write_gzip_header = gzip.GzipFile._write_gzip_header
+ gzip.GzipFile._write_gzip_header = nodate_write_gzip_header
+ with tarfile.open(path, 'w:%s' % archive_type) as archive:
+ for y in range(100):
+ test_data_io.seek(0)
+ tar_info = tarfile.TarInfo(file_name % y)
+ tar_info.size = 4 * 1024
+ archive.addfile(tar_info, test_data_io)
+
+ def testPackArchive(self, num_run=1, archive_type="gz"):
+ """
+ Test creating tar archive files
+ """
+ yield "x 100 x 5KB "
+ from Crypt import CryptHash
+
+ hash_valid_db = {
+ "gz": "92caec5121a31709cbbc8c11b0939758e670b055bbbe84f9beb3e781dfde710f",
+ "bz2": "b613f41e6ee947c8b9b589d3e8fa66f3e28f63be23f4faf015e2f01b5c0b032d",
+ "xz": "ae43892581d770959c8d993daffab25fd74490b7cf9fafc7aaee746f69895bcb",
+ }
+ archive_path = '%s/test.tar.%s' % (config.data_dir, archive_type)
+ for i in range(num_run):
+ self.createArchiveFile(archive_path, archive_type=archive_type)
+ yield "."
+
+ archive_size = os.path.getsize(archive_path) / 1024
+ yield "(Generated file size: %.2fkB)" % archive_size
+
+ hash = CryptHash.sha512sum(open("%s/test.tar.%s" % (config.data_dir, archive_type), "rb"))
+ valid = hash_valid_db[archive_type]
+ assert hash == valid, "Invalid hash: %s != %s
" % (hash, valid)
+
+ if os.path.isfile(archive_path):
+ os.unlink(archive_path)
+
+ def testUnpackArchive(self, num_run=1, archive_type="gz"):
+ """
+ Test reading tar archive files
+ """
+ yield "x 100 x 5KB "
+ import tarfile
+
+ test_data = b"Test" * 1024
+ file_name = b"\xc3\x81rv\xc3\xadzt\xc5\xb1r\xc5\x91%s.txt".decode("utf8")
+ archive_path = '%s/test.tar.%s' % (config.data_dir, archive_type)
+ self.createArchiveFile(archive_path, archive_type=archive_type)
+ for i in range(num_run):
+ with tarfile.open(archive_path, 'r:%s' % archive_type) as archive:
+ for y in range(100):
+ assert archive.extractfile(file_name % y).read() == test_data
+ yield "."
+ if os.path.isfile(archive_path):
+ os.unlink(archive_path)
+
+ def testPackMsgpack(self, num_run=1):
+ """
+ Test msgpack encoding
+ """
+ yield "x 100 x 5KB "
+ binary = b'fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv'
+ data = OrderedDict(
+ sorted({"int": 1024 * 1024 * 1024, "float": 12345.67890, "text": "hello" * 1024, "binary": binary}.items())
+ )
+ data_packed_valid = b'\x84\xa6binary\xc5\x01\x00fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv\xa5float\xcb@\xc8\x1c\xd6\xe61\xf8\xa1\xa3int\xce@\x00\x00\x00\xa4text\xda\x14\x00'
+ data_packed_valid += b'hello' * 1024
+ for y in range(num_run):
+ for i in range(100):
+ data_packed = Msgpack.pack(data)
+ yield "."
+ assert data_packed == data_packed_valid, "%s
!=
%s" % (repr(data_packed), repr(data_packed_valid))
+
+ def testUnpackMsgpack(self, num_run=1):
+ """
+ Test msgpack decoding
+ """
+ yield "x 5KB "
+ binary = b'fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv'
+ data = OrderedDict(
+ sorted({"int": 1024 * 1024 * 1024, "float": 12345.67890, "text": "hello" * 1024, "binary": binary}.items())
+ )
+ data_packed = b'\x84\xa6binary\xc5\x01\x00fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv\xa5float\xcb@\xc8\x1c\xd6\xe61\xf8\xa1\xa3int\xce@\x00\x00\x00\xa4text\xda\x14\x00'
+ data_packed += b'hello' * 1024
+ for y in range(num_run):
+ data_unpacked = Msgpack.unpack(data_packed, decode=False)
+ yield "."
+ assert data_unpacked == data, "%s
!=
%s" % (data_unpacked, data)
+
+ def testUnpackMsgpackStreaming(self, num_run=1, fallback=False):
+ """
+ Test streaming msgpack decoding
+ """
+ yield "x 1000 x 5KB "
+ binary = b'fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv'
+ data = OrderedDict(
+ sorted({"int": 1024 * 1024 * 1024, "float": 12345.67890, "text": "hello" * 1024, "binary": binary}.items())
+ )
+ data_packed = b'\x84\xa6binary\xc5\x01\x00fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv\xa5float\xcb@\xc8\x1c\xd6\xe61\xf8\xa1\xa3int\xce@\x00\x00\x00\xa4text\xda\x14\x00'
+ data_packed += b'hello' * 1024
+ for i in range(num_run):
+ unpacker = Msgpack.getUnpacker(decode=False, fallback=fallback)
+ for y in range(1000):
+ unpacker.feed(data_packed)
+ for data_unpacked in unpacker:
+ pass
+ yield "."
+ assert data == data_unpacked, "%s != %s" % (data_unpacked, data)
diff --git a/plugins/Benchmark/BenchmarkPlugin.py b/plugins/Benchmark/BenchmarkPlugin.py
new file mode 100644
index 00000000..fd6cacf3
--- /dev/null
+++ b/plugins/Benchmark/BenchmarkPlugin.py
@@ -0,0 +1,428 @@
+import os
+import time
+import io
+import math
+import hashlib
+import re
+import sys
+
+from Config import config
+from Crypt import CryptHash
+from Plugin import PluginManager
+from Debug import Debug
+from util import helper
+
+plugin_dir = os.path.dirname(__file__)
+
+benchmark_key = None
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ @helper.encodeResponse
+ def actionBenchmark(self):
+ global benchmark_key
+ script_nonce = self.getScriptNonce()
+ if not benchmark_key:
+ benchmark_key = CryptHash.random(encoding="base64")
+ self.sendHeader(script_nonce=script_nonce)
+
+ if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
+ yield "This function is disabled on this proxy"
+ return
+
+ data = self.render(
+ plugin_dir + "/media/benchmark.html",
+ script_nonce=script_nonce,
+ benchmark_key=benchmark_key,
+ filter=re.sub("[^A-Za-z0-9]", "", self.get.get("filter", ""))
+ )
+ yield data
+
+ @helper.encodeResponse
+ def actionBenchmarkResult(self):
+ global benchmark_key
+ if self.get.get("benchmark_key", "") != benchmark_key:
+ return self.error403("Invalid benchmark key")
+
+ self.sendHeader(content_type="text/plain", noscript=True)
+
+ if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
+ yield "This function is disabled on this proxy"
+ return
+
+ yield " " * 1024 # Head (required for streaming)
+
+ import main
+ s = time.time()
+
+ for part in main.actions.testBenchmark(filter=self.get.get("filter", "")):
+ yield part
+
+ yield "\n - Total time: %.3fs" % (time.time() - s)
+
+
+@PluginManager.registerTo("Actions")
+class ActionsPlugin:
+ def getMultiplerTitle(self, multipler):
+ if multipler < 0.3:
+ multipler_title = "Sloooow"
+ elif multipler < 0.6:
+ multipler_title = "Ehh"
+ elif multipler < 0.8:
+ multipler_title = "Goodish"
+ elif multipler < 1.2:
+ multipler_title = "OK"
+ elif multipler < 1.7:
+ multipler_title = "Fine"
+ elif multipler < 2.5:
+ multipler_title = "Fast"
+ elif multipler < 3.5:
+ multipler_title = "WOW"
+ else:
+ multipler_title = "Insane!!"
+ return multipler_title
+
+ def formatResult(self, taken, standard):
+ if not standard:
+ return " Done in %.3fs" % taken
+
+ if taken > 0:
+ multipler = standard / taken
+ else:
+ multipler = 99
+ multipler_title = self.getMultiplerTitle(multipler)
+
+ return " Done in %.3fs = %s (%.2fx)" % (taken, multipler_title, multipler)
+
+ def getBenchmarkTests(self, online=False):
+ if hasattr(super(), "getBenchmarkTests"):
+ tests = super().getBenchmarkTests(online)
+ else:
+ tests = []
+
+ tests.extend([
+ {"func": self.testHdPrivatekey, "num": 50, "time_standard": 0.57},
+ {"func": self.testSign, "num": 20, "time_standard": 0.46},
+ {"func": self.testVerify, "kwargs": {"lib_verify": "sslcrypto_fallback"}, "num": 20, "time_standard": 0.38},
+ {"func": self.testVerify, "kwargs": {"lib_verify": "sslcrypto"}, "num": 200, "time_standard": 0.30},
+ {"func": self.testVerify, "kwargs": {"lib_verify": "libsecp256k1"}, "num": 200, "time_standard": 0.10},
+
+ {"func": self.testPackMsgpack, "num": 100, "time_standard": 0.35},
+ {"func": self.testUnpackMsgpackStreaming, "kwargs": {"fallback": False}, "num": 100, "time_standard": 0.35},
+ {"func": self.testUnpackMsgpackStreaming, "kwargs": {"fallback": True}, "num": 10, "time_standard": 0.5},
+
+ {"func": self.testPackZip, "num": 5, "time_standard": 0.065},
+ {"func": self.testPackArchive, "kwargs": {"archive_type": "gz"}, "num": 5, "time_standard": 0.08},
+ {"func": self.testPackArchive, "kwargs": {"archive_type": "bz2"}, "num": 5, "time_standard": 0.68},
+ {"func": self.testPackArchive, "kwargs": {"archive_type": "xz"}, "num": 5, "time_standard": 0.47},
+ {"func": self.testUnpackZip, "num": 20, "time_standard": 0.25},
+ {"func": self.testUnpackArchive, "kwargs": {"archive_type": "gz"}, "num": 20, "time_standard": 0.28},
+ {"func": self.testUnpackArchive, "kwargs": {"archive_type": "bz2"}, "num": 20, "time_standard": 0.83},
+ {"func": self.testUnpackArchive, "kwargs": {"archive_type": "xz"}, "num": 20, "time_standard": 0.38},
+
+ {"func": self.testCryptHash, "kwargs": {"hash_type": "sha256"}, "num": 10, "time_standard": 0.50},
+ {"func": self.testCryptHash, "kwargs": {"hash_type": "sha512"}, "num": 10, "time_standard": 0.33},
+ {"func": self.testCryptHashlib, "kwargs": {"hash_type": "sha3_256"}, "num": 10, "time_standard": 0.33},
+ {"func": self.testCryptHashlib, "kwargs": {"hash_type": "sha3_512"}, "num": 10, "time_standard": 0.65},
+
+ {"func": self.testRandom, "num": 100, "time_standard": 0.08},
+ ])
+
+ if online:
+ tests += [
+ {"func": self.testHttps, "num": 1, "time_standard": 2.1}
+ ]
+ return tests
+
+ def testBenchmark(self, num_multipler=1, online=False, num_run=None, filter=None):
+ """
+ Run benchmark on client functions
+ """
+ tests = self.getBenchmarkTests(online=online)
+
+ if filter:
+ tests = [test for test in tests[:] if filter.lower() in test["func"].__name__.lower()]
+
+ yield "\n"
+ res = {}
+ res_time_taken = {}
+ multiplers = []
+ for test in tests:
+ s = time.time()
+ if num_run:
+ num_run_test = num_run
+ else:
+ num_run_test = math.ceil(test["num"] * num_multipler)
+ func = test["func"]
+ func_name = func.__name__
+ kwargs = test.get("kwargs", {})
+ key = "%s %s" % (func_name, kwargs)
+ if kwargs:
+ yield "* Running %s (%s) x %s " % (func_name, kwargs, num_run_test)
+ else:
+ yield "* Running %s x %s " % (func_name, num_run_test)
+ i = 0
+ try:
+ for progress in func(num_run_test, **kwargs):
+ i += 1
+ if num_run_test > 10:
+ should_print = i % (num_run_test / 10) == 0 or progress != "."
+ else:
+ should_print = True
+
+ if should_print:
+ if num_run_test == 1 and progress == ".":
+ progress = "..."
+ yield progress
+ time_taken = time.time() - s
+ if num_run:
+ time_standard = 0
+ else:
+ time_standard = test["time_standard"] * num_multipler
+ yield self.formatResult(time_taken, time_standard)
+ yield "\n"
+ res[key] = "ok"
+ res_time_taken[key] = time_taken
+ multiplers.append(time_standard / max(time_taken, 0.001))
+ except Exception as err:
+ res[key] = err
+ yield "Failed!\n! Error: %s\n\n" % Debug.formatException(err)
+
+ yield "\n== Result ==\n"
+
+ # Check verification speed
+ if "testVerify {'lib_verify': 'sslcrypto'}" in res_time_taken:
+ speed_order = ["sslcrypto_fallback", "sslcrypto", "libsecp256k1"]
+ time_taken = {}
+ for lib_verify in speed_order:
+ time_taken[lib_verify] = res_time_taken["testVerify {'lib_verify': '%s'}" % lib_verify]
+
+ time_taken["sslcrypto_fallback"] *= 10 # fallback benchmark only run 20 times instead of 200
+ speedup_sslcrypto = time_taken["sslcrypto_fallback"] / time_taken["sslcrypto"]
+ speedup_libsecp256k1 = time_taken["sslcrypto_fallback"] / time_taken["libsecp256k1"]
+
+ yield "\n* Verification speedup:\n"
+ yield " - OpenSSL: %.1fx (reference: 7.0x)\n" % speedup_sslcrypto
+ yield " - libsecp256k1: %.1fx (reference: 23.8x)\n" % speedup_libsecp256k1
+
+ if speedup_sslcrypto < 2:
+ res["Verification speed"] = "error: OpenSSL speedup low: %.1fx" % speedup_sslcrypto
+
+ if speedup_libsecp256k1 < speedup_sslcrypto:
+ res["Verification speed"] = "error: libsecp256k1 speedup low: %.1fx" % speedup_libsecp256k1
+
+ if not res:
+ yield "! No tests found"
+ if config.action == "test":
+ sys.exit(1)
+ else:
+ num_failed = len([res_key for res_key, res_val in res.items() if res_val != "ok"])
+ num_success = len([res_key for res_key, res_val in res.items() if res_val == "ok"])
+ yield "\n* Tests:\n"
+ yield " - Total: %s tests\n" % len(res)
+ yield " - Success: %s tests\n" % num_success
+ yield " - Failed: %s tests\n" % num_failed
+ if any(multiplers):
+ multipler_avg = sum(multiplers) / len(multiplers)
+ multipler_title = self.getMultiplerTitle(multipler_avg)
+ yield " - Average speed factor: %.2fx (%s)\n" % (multipler_avg, multipler_title)
+
+ # Display errors
+ for res_key, res_val in res.items():
+ if res_val != "ok":
+ yield " ! %s %s\n" % (res_key, res_val)
+
+ if num_failed != 0 and config.action == "test":
+ sys.exit(1)
+
+ def testHttps(self, num_run=1):
+ """
+ Test https connection with valid and invalid certs
+ """
+ import urllib.request
+ import urllib.error
+
+ body = urllib.request.urlopen("https://google.com").read()
+ assert len(body) > 100
+ yield "."
+
+ badssl_urls = [
+ "https://expired.badssl.com/",
+ "https://wrong.host.badssl.com/",
+ "https://self-signed.badssl.com/",
+ "https://untrusted-root.badssl.com/"
+ ]
+ for badssl_url in badssl_urls:
+ try:
+ body = urllib.request.urlopen(badssl_url).read()
+ https_err = None
+ except urllib.error.URLError as err:
+ https_err = err
+ assert https_err
+ yield "."
+
+ def testCryptHash(self, num_run=1, hash_type="sha256"):
+ """
+ Test hashing functions
+ """
+ yield "(5MB) "
+
+ from Crypt import CryptHash
+
+ hash_types = {
+ "sha256": {"func": CryptHash.sha256sum, "hash_valid": "8cd629d9d6aff6590da8b80782a5046d2673d5917b99d5603c3dcb4005c45ffa"},
+ "sha512": {"func": CryptHash.sha512sum, "hash_valid": "9ca7e855d430964d5b55b114e95c6bbb114a6d478f6485df93044d87b108904d"}
+ }
+ hash_func = hash_types[hash_type]["func"]
+ hash_valid = hash_types[hash_type]["hash_valid"]
+
+ data = io.BytesIO(b"Hello" * 1024 * 1024) # 5MB
+ for i in range(num_run):
+ data.seek(0)
+ hash = hash_func(data)
+ yield "."
+ assert hash == hash_valid, "%s != %s" % (hash, hash_valid)
+
+ def testCryptHashlib(self, num_run=1, hash_type="sha3_256"):
+ """
+ Test SHA3 hashing functions
+ """
+ yield "x 5MB "
+
+ hash_types = {
+ "sha3_256": {"func": hashlib.sha3_256, "hash_valid": "c8aeb3ef9fe5d6404871c0d2a4410a4d4e23268e06735648c9596f436c495f7e"},
+ "sha3_512": {"func": hashlib.sha3_512, "hash_valid": "b75dba9472d8af3cc945ce49073f3f8214d7ac12086c0453fb08944823dee1ae83b3ffbc87a53a57cc454521d6a26fe73ff0f3be38dddf3f7de5d7692ebc7f95"},
+ }
+
+ hash_func = hash_types[hash_type]["func"]
+ hash_valid = hash_types[hash_type]["hash_valid"]
+
+ data = io.BytesIO(b"Hello" * 1024 * 1024) # 5MB
+ for i in range(num_run):
+ data.seek(0)
+ h = hash_func()
+ while 1:
+ buff = data.read(1024 * 64)
+ if not buff:
+ break
+ h.update(buff)
+ hash = h.hexdigest()
+ yield "."
+ assert hash == hash_valid, "%s != %s" % (hash, hash_valid)
+
+ def testRandom(self, num_run=1):
+ """
+ Test generating random data
+ """
+ yield "x 1000 x 256 bytes "
+ for i in range(num_run):
+ data_last = None
+ for y in range(1000):
+ data = os.urandom(256)
+ assert data != data_last
+ assert len(data) == 256
+ data_last = data
+ yield "."
+
+ def testHdPrivatekey(self, num_run=2):
+ """
+ Test generating deterministic private keys from a master seed
+ """
+ from Crypt import CryptBitcoin
+ seed = "e180efa477c63b0f2757eac7b1cce781877177fe0966be62754ffd4c8592ce38"
+ privatekeys = []
+ for i in range(num_run):
+ privatekeys.append(CryptBitcoin.hdPrivatekey(seed, i * 10))
+ yield "."
+ valid = "5JSbeF5PevdrsYjunqpg7kAGbnCVYa1T4APSL3QRu8EoAmXRc7Y"
+ assert privatekeys[0] == valid, "%s != %s" % (privatekeys[0], valid)
+ if len(privatekeys) > 1:
+ assert privatekeys[0] != privatekeys[-1]
+
+ def testSign(self, num_run=1):
+ """
+ Test signing data using a private key
+ """
+ from Crypt import CryptBitcoin
+ data = "Hello" * 1024
+ privatekey = "5JsunC55XGVqFQj5kPGK4MWgTL26jKbnPhjnmchSNPo75XXCwtk"
+ for i in range(num_run):
+ yield "."
+ sign = CryptBitcoin.sign(data, privatekey)
+ valid = "G1GXaDauZ8vX/N9Jn+MRiGm9h+I94zUhDnNYFaqMGuOiBHB+kp4cRPZOL7l1yqK5BHa6J+W97bMjvTXtxzljp6w="
+ assert sign == valid, "%s != %s" % (sign, valid)
+
+ def testVerify(self, num_run=1, lib_verify="sslcrypto"):
+ """
+ Test verification of generated signatures
+ """
+ from Crypt import CryptBitcoin
+ CryptBitcoin.loadLib(lib_verify, silent=True)
+
+
+ data = "Hello" * 1024
+ privatekey = "5JsunC55XGVqFQj5kPGK4MWgTL26jKbnPhjnmchSNPo75XXCwtk"
+ address = CryptBitcoin.privatekeyToAddress(privatekey)
+ sign = "G1GXaDauZ8vX/N9Jn+MRiGm9h+I94zUhDnNYFaqMGuOiBHB+kp4cRPZOL7l1yqK5BHa6J+W97bMjvTXtxzljp6w="
+
+ for i in range(num_run):
+ ok = CryptBitcoin.verify(data, address, sign, lib_verify=lib_verify)
+ yield "."
+ assert ok, "does not verify from %s" % address
+
+ if lib_verify == "sslcrypto":
+ yield("(%s)" % CryptBitcoin.sslcrypto.ecc.get_backend())
+
+ def testPortCheckers(self):
+ """
+ Test all active open port checker
+ """
+ from Peer import PeerPortchecker
+ for ip_type, func_names in PeerPortchecker.PeerPortchecker.checker_functions.items():
+ yield "\n- %s:" % ip_type
+ for func_name in func_names:
+ yield "\n - Tracker %s: " % func_name
+ try:
+ for res in self.testPortChecker(func_name):
+ yield res
+ except Exception as err:
+ yield Debug.formatException(err)
+
+ def testPortChecker(self, func_name):
+ """
+ Test single open port checker
+ """
+ from Peer import PeerPortchecker
+ peer_portchecker = PeerPortchecker.PeerPortchecker(None)
+ announce_func = getattr(peer_portchecker, func_name)
+ res = announce_func(3894)
+ yield res
+
+ def testAll(self):
+ """
+ Run all tests to check system compatibility with ZeroNet functions
+ """
+ for progress in self.testBenchmark(online=not config.offline, num_run=1):
+ yield progress
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ back = super(ConfigPlugin, self).createArguments()
+ if self.getCmdlineValue("test") == "benchmark":
+ self.test_parser.add_argument(
+ '--num_multipler', help='Benchmark run time multipler',
+ default=1.0, type=float, metavar='num'
+ )
+ self.test_parser.add_argument(
+ '--filter', help='Filter running benchmark',
+ default=None, metavar='test name'
+ )
+ elif self.getCmdlineValue("test") == "portChecker":
+ self.test_parser.add_argument(
+ '--func_name', help='Name of open port checker function',
+ default=None, metavar='func_name'
+ )
+ return back
diff --git a/plugins/Benchmark/__init__.py b/plugins/Benchmark/__init__.py
new file mode 100644
index 00000000..76a5ae9c
--- /dev/null
+++ b/plugins/Benchmark/__init__.py
@@ -0,0 +1,3 @@
+from . import BenchmarkPlugin
+from . import BenchmarkDb
+from . import BenchmarkPack
diff --git a/plugins/Benchmark/media/benchmark.html b/plugins/Benchmark/media/benchmark.html
new file mode 100644
index 00000000..73571367
--- /dev/null
+++ b/plugins/Benchmark/media/benchmark.html
@@ -0,0 +1,123 @@
+
+
+
+
+
+Benchmark
+
+
+
+
+
\ No newline at end of file
diff --git a/plugins/Benchmark/plugin_info.json b/plugins/Benchmark/plugin_info.json
new file mode 100644
index 00000000..f3f57417
--- /dev/null
+++ b/plugins/Benchmark/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "Benchmark",
+ "description": "Test and benchmark database and cryptographic functions related to ZeroNet.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/Bigfile/BigfilePiecefield.py b/plugins/Bigfile/BigfilePiecefield.py
new file mode 100644
index 00000000..9a6f370b
--- /dev/null
+++ b/plugins/Bigfile/BigfilePiecefield.py
@@ -0,0 +1,170 @@
+import array
+
+
+def packPiecefield(data):
+ if not isinstance(data, bytes) and not isinstance(data, bytearray):
+ raise Exception("Invalid data type: %s" % type(data))
+
+ res = []
+ if not data:
+ return array.array("H", b"")
+
+ if data[0] == b"\x00":
+ res.append(0)
+ find = b"\x01"
+ else:
+ find = b"\x00"
+ last_pos = 0
+ pos = 0
+ while 1:
+ pos = data.find(find, pos)
+ if find == b"\x00":
+ find = b"\x01"
+ else:
+ find = b"\x00"
+ if pos == -1:
+ res.append(len(data) - last_pos)
+ break
+ res.append(pos - last_pos)
+ last_pos = pos
+ return array.array("H", res)
+
+
+def unpackPiecefield(data):
+ if not data:
+ return b""
+
+ res = []
+ char = b"\x01"
+ for times in data:
+ if times > 10000:
+ return b""
+ res.append(char * times)
+ if char == b"\x01":
+ char = b"\x00"
+ else:
+ char = b"\x01"
+ return b"".join(res)
+
+
+def spliceBit(data, idx, bit):
+ if bit != b"\x00" and bit != b"\x01":
+ raise Exception("Invalid bit: %s" % bit)
+
+ if len(data) < idx:
+ data = data.ljust(idx + 1, b"\x00")
+ return data[:idx] + bit + data[idx+ 1:]
+
+class Piecefield(object):
+ def tostring(self):
+ return "".join(["1" if b else "0" for b in self.tobytes()])
+
+
+class BigfilePiecefield(Piecefield):
+ __slots__ = ["data"]
+
+ def __init__(self):
+ self.data = b""
+
+ def frombytes(self, s):
+ if not isinstance(s, bytes) and not isinstance(s, bytearray):
+ raise Exception("Invalid type: %s" % type(s))
+ self.data = s
+
+ def tobytes(self):
+ return self.data
+
+ def pack(self):
+ return packPiecefield(self.data).tobytes()
+
+ def unpack(self, s):
+ self.data = unpackPiecefield(array.array("H", s))
+
+ def __getitem__(self, key):
+ try:
+ return self.data[key]
+ except IndexError:
+ return False
+
+ def __setitem__(self, key, value):
+ self.data = spliceBit(self.data, key, value)
+
+class BigfilePiecefieldPacked(Piecefield):
+ __slots__ = ["data"]
+
+ def __init__(self):
+ self.data = b""
+
+ def frombytes(self, data):
+ if not isinstance(data, bytes) and not isinstance(data, bytearray):
+ raise Exception("Invalid type: %s" % type(data))
+ self.data = packPiecefield(data).tobytes()
+
+ def tobytes(self):
+ return unpackPiecefield(array.array("H", self.data))
+
+ def pack(self):
+ return array.array("H", self.data).tobytes()
+
+ def unpack(self, data):
+ self.data = data
+
+ def __getitem__(self, key):
+ try:
+ return self.tobytes()[key]
+ except IndexError:
+ return False
+
+ def __setitem__(self, key, value):
+ data = spliceBit(self.tobytes(), key, value)
+ self.frombytes(data)
+
+
+if __name__ == "__main__":
+ import os
+ import psutil
+ import time
+ testdata = b"\x01" * 100 + b"\x00" * 900 + b"\x01" * 4000 + b"\x00" * 4999 + b"\x01"
+ meminfo = psutil.Process(os.getpid()).memory_info
+
+ for storage in [BigfilePiecefieldPacked, BigfilePiecefield]:
+ print("-- Testing storage: %s --" % storage)
+ m = meminfo()[0]
+ s = time.time()
+ piecefields = {}
+ for i in range(10000):
+ piecefield = storage()
+ piecefield.frombytes(testdata[:i] + b"\x00" + testdata[i + 1:])
+ piecefields[i] = piecefield
+
+ print("Create x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data)))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ val = piecefield[1000]
+
+ print("Query one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ piecefield[1000] = b"\x01"
+
+ print("Change one x10000: +%sKB in %.3fs" % ((meminfo()[0] - m) / 1024, time.time() - s))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ packed = piecefield.pack()
+
+ print("Pack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(packed)))
+
+ m = meminfo()[0]
+ s = time.time()
+ for piecefield in list(piecefields.values()):
+ piecefield.unpack(packed)
+
+ print("Unpack x10000: +%sKB in %.3fs (len: %s)" % ((meminfo()[0] - m) / 1024, time.time() - s, len(piecefields[0].data)))
+
+ piecefields = {}
diff --git a/plugins/Bigfile/BigfilePlugin.py b/plugins/Bigfile/BigfilePlugin.py
new file mode 100644
index 00000000..78a27b05
--- /dev/null
+++ b/plugins/Bigfile/BigfilePlugin.py
@@ -0,0 +1,843 @@
+import time
+import os
+import subprocess
+import shutil
+import collections
+import math
+import warnings
+import base64
+import binascii
+import json
+
+import gevent
+import gevent.lock
+
+from Plugin import PluginManager
+from Debug import Debug
+from Crypt import CryptHash
+with warnings.catch_warnings():
+ warnings.filterwarnings("ignore") # Ignore missing sha3 warning
+ import merkletools
+
+from util import helper
+from util import Msgpack
+from util.Flag import flag
+import util
+from .BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
+
+
+# We can only import plugin host clases after the plugins are loaded
+@PluginManager.afterLoad
+def importPluginnedClasses():
+ global VerifyError, config
+ from Content.ContentManager import VerifyError
+ from Config import config
+
+
+if "upload_nonces" not in locals():
+ upload_nonces = {}
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ def isCorsAllowed(self, path):
+ if path == "/ZeroNet-Internal/BigfileUpload":
+ return True
+ else:
+ return super(UiRequestPlugin, self).isCorsAllowed(path)
+
+ @helper.encodeResponse
+ def actionBigfileUpload(self):
+ nonce = self.get.get("upload_nonce")
+ if nonce not in upload_nonces:
+ return self.error403("Upload nonce error.")
+
+ upload_info = upload_nonces[nonce]
+ del upload_nonces[nonce]
+
+ self.sendHeader(200, "text/html", noscript=True, extra_headers={
+ "Access-Control-Allow-Origin": "null",
+ "Access-Control-Allow-Credentials": "true"
+ })
+
+ self.readMultipartHeaders(self.env['wsgi.input']) # Skip http headers
+ result = self.handleBigfileUpload(upload_info, self.env['wsgi.input'].read)
+ return json.dumps(result)
+
+ def actionBigfileUploadWebsocket(self):
+ ws = self.env.get("wsgi.websocket")
+
+ if not ws:
+ self.start_response("400 Bad Request", [])
+ return [b"Not a websocket request!"]
+
+ nonce = self.get.get("upload_nonce")
+ if nonce not in upload_nonces:
+ return self.error403("Upload nonce error.")
+
+ upload_info = upload_nonces[nonce]
+ del upload_nonces[nonce]
+
+ ws.send("poll")
+
+ buffer = b""
+ def read(size):
+ nonlocal buffer
+ while len(buffer) < size:
+ buffer += ws.receive()
+ ws.send("poll")
+ part, buffer = buffer[:size], buffer[size:]
+ return part
+
+ result = self.handleBigfileUpload(upload_info, read)
+ ws.send(json.dumps(result))
+
+ def handleBigfileUpload(self, upload_info, read):
+ site = upload_info["site"]
+ inner_path = upload_info["inner_path"]
+
+ with site.storage.open(inner_path, "wb", create_dirs=True) as out_file:
+ merkle_root, piece_size, piecemap_info = site.content_manager.hashBigfile(
+ read, upload_info["size"], upload_info["piece_size"], out_file
+ )
+
+ if len(piecemap_info["sha512_pieces"]) == 1: # Small file, don't split
+ hash = binascii.hexlify(piecemap_info["sha512_pieces"][0])
+ hash_id = site.content_manager.hashfield.getHashId(hash)
+ site.content_manager.optionalDownloaded(inner_path, hash_id, upload_info["size"], own=True)
+
+ else: # Big file
+ file_name = helper.getFilename(inner_path)
+ site.storage.open(upload_info["piecemap"], "wb").write(Msgpack.pack({file_name: piecemap_info}))
+
+ # Find piecemap and file relative path to content.json
+ file_info = site.content_manager.getFileInfo(inner_path, new_file=True)
+ content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
+ piecemap_relative_path = upload_info["piecemap"][len(content_inner_path_dir):]
+ file_relative_path = inner_path[len(content_inner_path_dir):]
+
+ # Add file to content.json
+ if site.storage.isFile(file_info["content_inner_path"]):
+ content = site.storage.loadJson(file_info["content_inner_path"])
+ else:
+ content = {}
+ if "files_optional" not in content:
+ content["files_optional"] = {}
+
+ content["files_optional"][file_relative_path] = {
+ "sha512": merkle_root,
+ "size": upload_info["size"],
+ "piecemap": piecemap_relative_path,
+ "piece_size": piece_size
+ }
+
+ merkle_root_hash_id = site.content_manager.hashfield.getHashId(merkle_root)
+ site.content_manager.optionalDownloaded(inner_path, merkle_root_hash_id, upload_info["size"], own=True)
+ site.storage.writeJson(file_info["content_inner_path"], content)
+
+ site.content_manager.contents.loadItem(file_info["content_inner_path"]) # reload cache
+
+ return {
+ "merkle_root": merkle_root,
+ "piece_num": len(piecemap_info["sha512_pieces"]),
+ "piece_size": piece_size,
+ "inner_path": inner_path
+ }
+
+ def readMultipartHeaders(self, wsgi_input):
+ found = False
+ for i in range(100):
+ line = wsgi_input.readline()
+ if line == b"\r\n":
+ found = True
+ break
+ if not found:
+ raise Exception("No multipart header found")
+ return i
+
+ def actionFile(self, file_path, *args, **kwargs):
+ if kwargs.get("file_size", 0) > 1024 * 1024 and kwargs.get("path_parts"): # Only check files larger than 1MB
+ path_parts = kwargs["path_parts"]
+ site = self.server.site_manager.get(path_parts["address"])
+ big_file = site.storage.openBigfile(path_parts["inner_path"], prebuffer=2 * 1024 * 1024)
+ if big_file:
+ kwargs["file_obj"] = big_file
+ kwargs["file_size"] = big_file.size
+
+ return super(UiRequestPlugin, self).actionFile(file_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ def actionBigfileUploadInit(self, to, inner_path, size, protocol="xhr"):
+ valid_signers = self.site.content_manager.getValidSigners(inner_path)
+ auth_address = self.user.getAuthAddress(self.site.address)
+ if not self.site.settings["own"] and auth_address not in valid_signers:
+ self.log.error("FileWrite forbidden %s not in valid_signers %s" % (auth_address, valid_signers))
+ return self.response(to, {"error": "Forbidden, you can only modify your own files"})
+
+ nonce = CryptHash.random()
+ piece_size = 1024 * 1024
+ inner_path = self.site.content_manager.sanitizePath(inner_path)
+ file_info = self.site.content_manager.getFileInfo(inner_path, new_file=True)
+
+ content_inner_path_dir = helper.getDirname(file_info["content_inner_path"])
+ file_relative_path = inner_path[len(content_inner_path_dir):]
+
+ upload_nonces[nonce] = {
+ "added": time.time(),
+ "site": self.site,
+ "inner_path": inner_path,
+ "websocket_client": self,
+ "size": size,
+ "piece_size": piece_size,
+ "piecemap": inner_path + ".piecemap.msgpack"
+ }
+
+ if protocol == "xhr":
+ return {
+ "url": "/ZeroNet-Internal/BigfileUpload?upload_nonce=" + nonce,
+ "piece_size": piece_size,
+ "inner_path": inner_path,
+ "file_relative_path": file_relative_path
+ }
+ elif protocol == "websocket":
+ server_url = self.request.getWsServerUrl()
+ if server_url:
+ proto, host = server_url.split("://")
+ origin = proto.replace("http", "ws") + "://" + host
+ else:
+ origin = "{origin}"
+ return {
+ "url": origin + "/ZeroNet-Internal/BigfileUploadWebsocket?upload_nonce=" + nonce,
+ "piece_size": piece_size,
+ "inner_path": inner_path,
+ "file_relative_path": file_relative_path
+ }
+ else:
+ return {"error": "Unknown protocol"}
+
+ @flag.no_multiuser
+ def actionSiteSetAutodownloadBigfileLimit(self, to, limit):
+ permissions = self.getPermissions(to)
+ if "ADMIN" not in permissions:
+ return self.response(to, "You don't have permission to run this command")
+
+ self.site.settings["autodownload_bigfile_size_limit"] = int(limit)
+ self.response(to, "ok")
+
+ def actionFileDelete(self, to, inner_path):
+ piecemap_inner_path = inner_path + ".piecemap.msgpack"
+ if self.hasFilePermission(inner_path) and self.site.storage.isFile(piecemap_inner_path):
+ # Also delete .piecemap.msgpack file if exists
+ self.log.debug("Deleting piecemap: %s" % piecemap_inner_path)
+ file_info = self.site.content_manager.getFileInfo(piecemap_inner_path)
+ if file_info:
+ content_json = self.site.storage.loadJson(file_info["content_inner_path"])
+ relative_path = file_info["relative_path"]
+ if relative_path in content_json.get("files_optional", {}):
+ del content_json["files_optional"][relative_path]
+ self.site.storage.writeJson(file_info["content_inner_path"], content_json)
+ self.site.content_manager.loadContent(file_info["content_inner_path"], add_bad_files=False, force=True)
+ try:
+ self.site.storage.delete(piecemap_inner_path)
+ except Exception as err:
+ self.log.error("File %s delete error: %s" % (piecemap_inner_path, err))
+
+ return super(UiWebsocketPlugin, self).actionFileDelete(to, inner_path)
+
+
+@PluginManager.registerTo("ContentManager")
+class ContentManagerPlugin(object):
+ def getFileInfo(self, inner_path, *args, **kwargs):
+ if "|" not in inner_path:
+ return super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_info = super(ContentManagerPlugin, self).getFileInfo(inner_path, *args, **kwargs)
+ return file_info
+
+ def readFile(self, read_func, size, buff_size=1024 * 64):
+ part_num = 0
+ recv_left = size
+
+ while 1:
+ part_num += 1
+ read_size = min(buff_size, recv_left)
+ part = read_func(read_size)
+
+ if not part:
+ break
+ yield part
+
+ if part_num % 100 == 0: # Avoid blocking ZeroNet execution during upload
+ time.sleep(0.001)
+
+ recv_left -= read_size
+ if recv_left <= 0:
+ break
+
+ def hashBigfile(self, read_func, size, piece_size=1024 * 1024, file_out=None):
+ self.site.settings["has_bigfile"] = True
+
+ recv = 0
+ try:
+ piece_hash = CryptHash.sha512t()
+ piece_hashes = []
+ piece_recv = 0
+
+ mt = merkletools.MerkleTools()
+ mt.hash_function = CryptHash.sha512t
+
+ part = ""
+ for part in self.readFile(read_func, size):
+ if file_out:
+ file_out.write(part)
+
+ recv += len(part)
+ piece_recv += len(part)
+ piece_hash.update(part)
+ if piece_recv >= piece_size:
+ piece_digest = piece_hash.digest()
+ piece_hashes.append(piece_digest)
+ mt.leaves.append(piece_digest)
+ piece_hash = CryptHash.sha512t()
+ piece_recv = 0
+
+ if len(piece_hashes) % 100 == 0 or recv == size:
+ self.log.info("- [HASHING:%.0f%%] Pieces: %s, %.1fMB/%.1fMB" % (
+ float(recv) / size * 100, len(piece_hashes), recv / 1024 / 1024, size / 1024 / 1024
+ ))
+ part = ""
+ if len(part) > 0:
+ piece_digest = piece_hash.digest()
+ piece_hashes.append(piece_digest)
+ mt.leaves.append(piece_digest)
+ except Exception as err:
+ raise err
+ finally:
+ if file_out:
+ file_out.close()
+
+ mt.make_tree()
+ merkle_root = mt.get_merkle_root()
+ if type(merkle_root) is bytes: # Python <3.5
+ merkle_root = merkle_root.decode()
+ return merkle_root, piece_size, {
+ "sha512_pieces": piece_hashes
+ }
+
+ def hashFile(self, dir_inner_path, file_relative_path, optional=False):
+ inner_path = dir_inner_path + file_relative_path
+
+ file_size = self.site.storage.getSize(inner_path)
+ # Only care about optional files >1MB
+ if not optional or file_size < 1 * 1024 * 1024:
+ return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
+
+ back = {}
+ content = self.contents.get(dir_inner_path + "content.json")
+
+ hash = None
+ piecemap_relative_path = None
+ piece_size = None
+
+ # Don't re-hash if it's already in content.json
+ if content and file_relative_path in content.get("files_optional", {}):
+ file_node = content["files_optional"][file_relative_path]
+ if file_node["size"] == file_size:
+ self.log.info("- [SAME SIZE] %s" % file_relative_path)
+ hash = file_node.get("sha512")
+ piecemap_relative_path = file_node.get("piecemap")
+ piece_size = file_node.get("piece_size")
+
+ if not hash or not piecemap_relative_path: # Not in content.json yet
+ if file_size < 5 * 1024 * 1024: # Don't create piecemap automatically for files smaller than 5MB
+ return super(ContentManagerPlugin, self).hashFile(dir_inner_path, file_relative_path, optional)
+
+ self.log.info("- [HASHING] %s" % file_relative_path)
+ merkle_root, piece_size, piecemap_info = self.hashBigfile(self.site.storage.open(inner_path, "rb").read, file_size)
+ if not hash:
+ hash = merkle_root
+
+ if not piecemap_relative_path:
+ file_name = helper.getFilename(file_relative_path)
+ piecemap_relative_path = file_relative_path + ".piecemap.msgpack"
+ piecemap_inner_path = inner_path + ".piecemap.msgpack"
+
+ self.site.storage.open(piecemap_inner_path, "wb").write(Msgpack.pack({file_name: piecemap_info}))
+
+ back.update(super(ContentManagerPlugin, self).hashFile(dir_inner_path, piecemap_relative_path, optional=True))
+
+ piece_num = int(math.ceil(float(file_size) / piece_size))
+
+ # Add the merkle root to hashfield
+ hash_id = self.site.content_manager.hashfield.getHashId(hash)
+ self.optionalDownloaded(inner_path, hash_id, file_size, own=True)
+ self.site.storage.piecefields[hash].frombytes(b"\x01" * piece_num)
+
+ back[file_relative_path] = {"sha512": hash, "size": file_size, "piecemap": piecemap_relative_path, "piece_size": piece_size}
+ return back
+
+ def getPiecemap(self, inner_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
+ self.site.needFile(piecemap_inner_path, priority=20)
+ piecemap = Msgpack.unpack(self.site.storage.open(piecemap_inner_path, "rb").read())[helper.getFilename(inner_path)]
+ piecemap["piece_size"] = file_info["piece_size"]
+ return piecemap
+
+ def verifyPiece(self, inner_path, pos, piece):
+ try:
+ piecemap = self.getPiecemap(inner_path)
+ except Exception as err:
+ raise VerifyError("Unable to download piecemap: %s" % Debug.formatException(err))
+
+ piece_i = int(pos / piecemap["piece_size"])
+ if CryptHash.sha512sum(piece, format="digest") != piecemap["sha512_pieces"][piece_i]:
+ raise VerifyError("Invalid hash")
+ return True
+
+ def verifyFile(self, inner_path, file, ignore_same=True):
+ if "|" not in inner_path:
+ return super(ContentManagerPlugin, self).verifyFile(inner_path, file, ignore_same)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+
+ return self.verifyPiece(inner_path, pos_from, file)
+
+ def optionalDownloaded(self, inner_path, hash_id, size=None, own=False):
+ if "|" in inner_path:
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_info = self.getFileInfo(inner_path)
+
+ # Mark piece downloaded
+ piece_i = int(pos_from / file_info["piece_size"])
+ self.site.storage.piecefields[file_info["sha512"]][piece_i] = b"\x01"
+
+ # Only add to site size on first request
+ if hash_id in self.hashfield:
+ size = 0
+ elif size > 1024 * 1024:
+ file_info = self.getFileInfo(inner_path)
+ if file_info and "sha512" in file_info: # We already have the file, but not in piecefield
+ sha512 = file_info["sha512"]
+ if sha512 not in self.site.storage.piecefields:
+ self.site.storage.checkBigfile(inner_path)
+
+ return super(ContentManagerPlugin, self).optionalDownloaded(inner_path, hash_id, size, own)
+
+ def optionalRemoved(self, inner_path, hash_id, size=None):
+ if size and size > 1024 * 1024:
+ file_info = self.getFileInfo(inner_path)
+ sha512 = file_info["sha512"]
+ if sha512 in self.site.storage.piecefields:
+ del self.site.storage.piecefields[sha512]
+
+ # Also remove other pieces of the file from download queue
+ for key in list(self.site.bad_files.keys()):
+ if key.startswith(inner_path + "|"):
+ del self.site.bad_files[key]
+ self.site.worker_manager.removeSolvedFileTasks()
+ return super(ContentManagerPlugin, self).optionalRemoved(inner_path, hash_id, size)
+
+
+@PluginManager.registerTo("SiteStorage")
+class SiteStoragePlugin(object):
+ def __init__(self, *args, **kwargs):
+ super(SiteStoragePlugin, self).__init__(*args, **kwargs)
+ self.piecefields = collections.defaultdict(BigfilePiecefield)
+ if "piecefields" in self.site.settings.get("cache", {}):
+ for sha512, piecefield_packed in self.site.settings["cache"].get("piecefields").items():
+ if piecefield_packed:
+ self.piecefields[sha512].unpack(base64.b64decode(piecefield_packed))
+ self.site.settings["cache"]["piecefields"] = {}
+
+ def createSparseFile(self, inner_path, size, sha512=None):
+ file_path = self.getPath(inner_path)
+
+ self.ensureDir(os.path.dirname(inner_path))
+
+ f = open(file_path, 'wb')
+ f.truncate(min(1024 * 1024 * 5, size)) # Only pre-allocate up to 5MB
+ f.close()
+ if os.name == "nt":
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ subprocess.call(["fsutil", "sparse", "setflag", file_path], close_fds=True, startupinfo=startupinfo)
+
+ if sha512 and sha512 in self.piecefields:
+ self.log.debug("%s: File not exists, but has piecefield. Deleting piecefield." % inner_path)
+ del self.piecefields[sha512]
+
+ def write(self, inner_path, content):
+ if "|" not in inner_path:
+ return super(SiteStoragePlugin, self).write(inner_path, content)
+
+ # Write to specific position by passing |{pos} after the filename
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ file_path = self.getPath(inner_path)
+
+ # Create dir if not exist
+ self.ensureDir(os.path.dirname(inner_path))
+
+ if not os.path.isfile(file_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ self.createSparseFile(inner_path, file_info["size"])
+
+ # Write file
+ with open(file_path, "rb+") as file:
+ file.seek(pos_from)
+ if hasattr(content, 'read'): # File-like object
+ shutil.copyfileobj(content, file) # Write buff to disk
+ else: # Simple string
+ file.write(content)
+ del content
+ self.onUpdated(inner_path)
+
+ def checkBigfile(self, inner_path):
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ if not file_info or (file_info and "piecemap" not in file_info): # It's not a big file
+ return False
+
+ self.site.settings["has_bigfile"] = True
+ file_path = self.getPath(inner_path)
+ sha512 = file_info["sha512"]
+ piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
+ if os.path.isfile(file_path):
+ if sha512 not in self.piecefields:
+ if open(file_path, "rb").read(128) == b"\0" * 128:
+ piece_data = b"\x00"
+ else:
+ piece_data = b"\x01"
+ self.log.debug("%s: File exists, but not in piecefield. Filling piecefiled with %s * %s." % (inner_path, piece_num, piece_data))
+ self.piecefields[sha512].frombytes(piece_data * piece_num)
+ else:
+ self.log.debug("Creating bigfile: %s" % inner_path)
+ self.createSparseFile(inner_path, file_info["size"], sha512)
+ self.piecefields[sha512].frombytes(b"\x00" * piece_num)
+ self.log.debug("Created bigfile: %s" % inner_path)
+ return True
+
+ def openBigfile(self, inner_path, prebuffer=0):
+ if not self.checkBigfile(inner_path):
+ return False
+ self.site.needFile(inner_path, blocking=False) # Download piecemap
+ return BigFile(self.site, inner_path, prebuffer=prebuffer)
+
+
+class BigFile(object):
+ def __init__(self, site, inner_path, prebuffer=0):
+ self.site = site
+ self.inner_path = inner_path
+ file_path = site.storage.getPath(inner_path)
+ file_info = self.site.content_manager.getFileInfo(inner_path)
+ self.piece_size = file_info["piece_size"]
+ self.sha512 = file_info["sha512"]
+ self.size = file_info["size"]
+ self.prebuffer = prebuffer
+ self.read_bytes = 0
+
+ self.piecefield = self.site.storage.piecefields[self.sha512]
+ self.f = open(file_path, "rb+")
+ self.read_lock = gevent.lock.Semaphore()
+
+ def read(self, buff=64 * 1024):
+ with self.read_lock:
+ pos = self.f.tell()
+ read_until = min(self.size, pos + buff)
+ requests = []
+ # Request all required blocks
+ while 1:
+ piece_i = int(pos / self.piece_size)
+ if piece_i * self.piece_size >= read_until:
+ break
+ pos_from = piece_i * self.piece_size
+ pos_to = pos_from + self.piece_size
+ if not self.piecefield[piece_i]:
+ requests.append(self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=10))
+ pos += self.piece_size
+
+ if not all(requests):
+ return None
+
+ # Request prebuffer
+ if self.prebuffer:
+ prebuffer_until = min(self.size, read_until + self.prebuffer)
+ priority = 3
+ while 1:
+ piece_i = int(pos / self.piece_size)
+ if piece_i * self.piece_size >= prebuffer_until:
+ break
+ pos_from = piece_i * self.piece_size
+ pos_to = pos_from + self.piece_size
+ if not self.piecefield[piece_i]:
+ self.site.needFile("%s|%s-%s" % (self.inner_path, pos_from, pos_to), blocking=False, update=True, priority=max(0, priority))
+ priority -= 1
+ pos += self.piece_size
+
+ gevent.joinall(requests)
+ self.read_bytes += buff
+
+ # Increase buffer for long reads
+ if self.read_bytes > 7 * 1024 * 1024 and self.prebuffer < 5 * 1024 * 1024:
+ self.site.log.debug("%s: Increasing bigfile buffer size to 5MB..." % self.inner_path)
+ self.prebuffer = 5 * 1024 * 1024
+
+ return self.f.read(buff)
+
+ def seek(self, pos, whence=0):
+ with self.read_lock:
+ if whence == 2: # Relative from file end
+ pos = self.size + pos # Use the real size instead of size on the disk
+ whence = 0
+ return self.f.seek(pos, whence)
+
+ def seekable(self):
+ return self.f.seekable()
+
+ def tell(self):
+ return self.f.tell()
+
+ def close(self):
+ self.f.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+
+@PluginManager.registerTo("WorkerManager")
+class WorkerManagerPlugin(object):
+ def addTask(self, inner_path, *args, **kwargs):
+ file_info = kwargs.get("file_info")
+ if file_info and "piecemap" in file_info: # Bigfile
+ self.site.settings["has_bigfile"] = True
+
+ piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
+ piecemap_task = None
+ if not self.site.storage.isFile(piecemap_inner_path):
+ # Start download piecemap
+ piecemap_task = super(WorkerManagerPlugin, self).addTask(piecemap_inner_path, priority=30)
+ autodownload_bigfile_size_limit = self.site.settings.get("autodownload_bigfile_size_limit", config.autodownload_bigfile_size_limit)
+ if "|" not in inner_path and self.site.isDownloadable(inner_path) and file_info["size"] / 1024 / 1024 <= autodownload_bigfile_size_limit:
+ gevent.spawn_later(0.1, self.site.needFile, inner_path + "|all") # Download all pieces
+
+ if "|" in inner_path:
+ # Start download piece
+ task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
+
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ task["piece_i"] = int(pos_from / file_info["piece_size"])
+ task["sha512"] = file_info["sha512"]
+ else:
+ if inner_path in self.site.bad_files:
+ del self.site.bad_files[inner_path]
+ if piecemap_task:
+ task = piecemap_task
+ else:
+ fake_evt = gevent.event.AsyncResult() # Don't download anything if no range specified
+ fake_evt.set(True)
+ task = {"evt": fake_evt}
+
+ if not self.site.storage.isFile(inner_path):
+ self.site.storage.createSparseFile(inner_path, file_info["size"], file_info["sha512"])
+ piece_num = int(math.ceil(float(file_info["size"]) / file_info["piece_size"]))
+ self.site.storage.piecefields[file_info["sha512"]].frombytes(b"\x00" * piece_num)
+ else:
+ task = super(WorkerManagerPlugin, self).addTask(inner_path, *args, **kwargs)
+ return task
+
+ def taskAddPeer(self, task, peer):
+ if "piece_i" in task:
+ if not peer.piecefields[task["sha512"]][task["piece_i"]]:
+ if task["sha512"] not in peer.piecefields:
+ gevent.spawn(peer.updatePiecefields, force=True)
+ elif not task["peers"]:
+ gevent.spawn(peer.updatePiecefields)
+
+ return False # Deny to add peers to task if file not in piecefield
+ return super(WorkerManagerPlugin, self).taskAddPeer(task, peer)
+
+
+@PluginManager.registerTo("FileRequest")
+class FileRequestPlugin(object):
+ def isReadable(self, site, inner_path, file, pos):
+ # Peek into file
+ if file.read(10) == b"\0" * 10:
+ # Looks empty, but makes sures we don't have that piece
+ file_info = site.content_manager.getFileInfo(inner_path)
+ if "piece_size" in file_info:
+ piece_i = int(pos / file_info["piece_size"])
+ if not site.storage.piecefields[file_info["sha512"]][piece_i]:
+ return False
+ # Seek back to position we want to read
+ file.seek(pos)
+ return super(FileRequestPlugin, self).isReadable(site, inner_path, file, pos)
+
+ def actionGetPiecefields(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ return False
+
+ # Add peer to site if not added before
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True)
+ if not peer.connection: # Just added
+ peer.connect(self.connection) # Assign current connection to peer
+
+ piecefields_packed = {sha512: piecefield.pack() for sha512, piecefield in site.storage.piecefields.items()}
+ self.response({"piecefields_packed": piecefields_packed})
+
+ def actionSetPiecefields(self, params):
+ site = self.sites.get(params["site"])
+ if not site or not site.isServing(): # Site unknown or not serving
+ self.response({"error": "Unknown site"})
+ self.connection.badAction(5)
+ return False
+
+ # Add or get peer
+ peer = site.addPeer(self.connection.ip, self.connection.port, return_peer=True, connection=self.connection)
+ if not peer.connection:
+ peer.connect(self.connection)
+
+ peer.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ for sha512, piecefield_packed in params["piecefields_packed"].items():
+ peer.piecefields[sha512].unpack(piecefield_packed)
+ site.settings["has_bigfile"] = True
+
+ self.response({"ok": "Updated"})
+
+
+@PluginManager.registerTo("Peer")
+class PeerPlugin(object):
+ def __getattr__(self, key):
+ if key == "piecefields":
+ self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ return self.piecefields
+ elif key == "time_piecefields_updated":
+ self.time_piecefields_updated = None
+ return self.time_piecefields_updated
+ else:
+ return super(PeerPlugin, self).__getattr__(key)
+
+ @util.Noparallel(ignore_args=True)
+ def updatePiecefields(self, force=False):
+ if self.connection and self.connection.handshake.get("rev", 0) < 2190:
+ return False # Not supported
+
+ # Don't update piecefield again in 1 min
+ if self.time_piecefields_updated and time.time() - self.time_piecefields_updated < 60 and not force:
+ return False
+
+ self.time_piecefields_updated = time.time()
+ res = self.request("getPiecefields", {"site": self.site.address})
+ if not res or "error" in res:
+ return False
+
+ self.piecefields = collections.defaultdict(BigfilePiecefieldPacked)
+ try:
+ for sha512, piecefield_packed in res["piecefields_packed"].items():
+ self.piecefields[sha512].unpack(piecefield_packed)
+ except Exception as err:
+ self.log("Invalid updatePiecefields response: %s" % Debug.formatException(err))
+
+ return self.piecefields
+
+ def sendMyHashfield(self, *args, **kwargs):
+ return super(PeerPlugin, self).sendMyHashfield(*args, **kwargs)
+
+ def updateHashfield(self, *args, **kwargs):
+ if self.site.settings.get("has_bigfile"):
+ thread = gevent.spawn(self.updatePiecefields, *args, **kwargs)
+ back = super(PeerPlugin, self).updateHashfield(*args, **kwargs)
+ thread.join()
+ return back
+ else:
+ return super(PeerPlugin, self).updateHashfield(*args, **kwargs)
+
+ def getFile(self, site, inner_path, *args, **kwargs):
+ if "|" in inner_path:
+ inner_path, file_range = inner_path.split("|")
+ pos_from, pos_to = map(int, file_range.split("-"))
+ kwargs["pos_from"] = pos_from
+ kwargs["pos_to"] = pos_to
+ return super(PeerPlugin, self).getFile(site, inner_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("Site")
+class SitePlugin(object):
+ def isFileDownloadAllowed(self, inner_path, file_info):
+ if "piecemap" in file_info:
+ file_size_mb = file_info["size"] / 1024 / 1024
+ if config.bigfile_size_limit and file_size_mb > config.bigfile_size_limit:
+ self.log.debug(
+ "Bigfile size %s too large: %sMB > %sMB, skipping..." %
+ (inner_path, file_size_mb, config.bigfile_size_limit)
+ )
+ return False
+
+ file_info = file_info.copy()
+ file_info["size"] = file_info["piece_size"]
+ return super(SitePlugin, self).isFileDownloadAllowed(inner_path, file_info)
+
+ def getSettingsCache(self):
+ back = super(SitePlugin, self).getSettingsCache()
+ if self.storage.piecefields:
+ back["piecefields"] = {sha512: base64.b64encode(piecefield.pack()).decode("utf8") for sha512, piecefield in self.storage.piecefields.items()}
+ return back
+
+ def needFile(self, inner_path, *args, **kwargs):
+ if inner_path.endswith("|all"):
+ @util.Pooled(20)
+ def pooledNeedBigfile(inner_path, *args, **kwargs):
+ if inner_path not in self.bad_files:
+ self.log.debug("Cancelled piece, skipping %s" % inner_path)
+ return False
+ return self.needFile(inner_path, *args, **kwargs)
+
+ inner_path = inner_path.replace("|all", "")
+ file_info = self.needFileInfo(inner_path)
+
+ # Use default function to download non-optional file
+ if "piece_size" not in file_info:
+ return super(SitePlugin, self).needFile(inner_path, *args, **kwargs)
+
+ file_size = file_info["size"]
+ piece_size = file_info["piece_size"]
+
+ piece_num = int(math.ceil(float(file_size) / piece_size))
+
+ file_threads = []
+
+ piecefield = self.storage.piecefields.get(file_info["sha512"])
+
+ for piece_i in range(piece_num):
+ piece_from = piece_i * piece_size
+ piece_to = min(file_size, piece_from + piece_size)
+ if not piecefield or not piecefield[piece_i]:
+ inner_path_piece = "%s|%s-%s" % (inner_path, piece_from, piece_to)
+ self.bad_files[inner_path_piece] = self.bad_files.get(inner_path_piece, 1)
+ res = pooledNeedBigfile(inner_path_piece, blocking=False)
+ if res is not True and res is not False:
+ file_threads.append(res)
+ gevent.joinall(file_threads)
+ else:
+ return super(SitePlugin, self).needFile(inner_path, *args, **kwargs)
+
+
+@PluginManager.registerTo("ConfigPlugin")
+class ConfigPlugin(object):
+ def createArguments(self):
+ group = self.parser.add_argument_group("Bigfile plugin")
+ group.add_argument('--autodownload_bigfile_size_limit', help='Also download bigfiles smaller than this limit if help distribute option is checked', default=10, metavar="MB", type=int)
+ group.add_argument('--bigfile_size_limit', help='Maximum size of downloaded big files', default=False, metavar="MB", type=int)
+
+ return super(ConfigPlugin, self).createArguments()
diff --git a/plugins/Bigfile/Test/TestBigfile.py b/plugins/Bigfile/Test/TestBigfile.py
new file mode 100644
index 00000000..402646a6
--- /dev/null
+++ b/plugins/Bigfile/Test/TestBigfile.py
@@ -0,0 +1,574 @@
+import time
+import io
+import binascii
+
+import pytest
+import mock
+
+from Connection import ConnectionServer
+from Content.ContentManager import VerifyError
+from File import FileServer
+from File import FileRequest
+from Worker import WorkerManager
+from Peer import Peer
+from Bigfile import BigfilePiecefield, BigfilePiecefieldPacked
+from Test import Spy
+from util import Msgpack
+
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestBigfile:
+ privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
+ piece_size = 1024 * 1024
+
+ def createBigfile(self, site, inner_path="data/optional.any.iso", pieces=10):
+ f = site.storage.open(inner_path, "w")
+ for i in range(pieces * 100):
+ f.write(("Test%s" % i).ljust(10, "-") * 1000)
+ f.close()
+ assert site.content_manager.sign("content.json", self.privatekey)
+ return inner_path
+
+ def testPiecemapCreate(self, site):
+ inner_path = self.createBigfile(site)
+ content = site.storage.loadJson("content.json")
+ assert "data/optional.any.iso" in content["files_optional"]
+ file_node = content["files_optional"][inner_path]
+ assert file_node["size"] == 10 * 1000 * 1000
+ assert file_node["sha512"] == "47a72cde3be80b4a829e7674f72b7c6878cf6a70b0c58c6aa6c17d7e9948daf6"
+ assert file_node["piecemap"] == inner_path + ".piecemap.msgpack"
+
+ piecemap = Msgpack.unpack(site.storage.open(file_node["piecemap"], "rb").read())["optional.any.iso"]
+ assert len(piecemap["sha512_pieces"]) == 10
+ assert piecemap["sha512_pieces"][0] != piecemap["sha512_pieces"][1]
+ assert binascii.hexlify(piecemap["sha512_pieces"][0]) == b"a73abad9992b3d0b672d0c2a292046695d31bebdcb1e150c8410bbe7c972eff3"
+
+ def testVerifyPiece(self, site):
+ inner_path = self.createBigfile(site)
+
+ # Verify all 10 piece
+ f = site.storage.open(inner_path, "rb")
+ for i in range(10):
+ piece = io.BytesIO(f.read(1024 * 1024))
+ piece.seek(0)
+ site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
+ f.close()
+
+ # Try to verify piece 0 with piece 1 hash
+ with pytest.raises(VerifyError) as err:
+ i = 1
+ f = site.storage.open(inner_path, "rb")
+ piece = io.BytesIO(f.read(1024 * 1024))
+ f.close()
+ site.content_manager.verifyPiece(inner_path, i * 1024 * 1024, piece)
+ assert "Invalid hash" in str(err.value)
+
+ def testSparseFile(self, site):
+ inner_path = "sparsefile"
+
+ # Create a 100MB sparse file
+ site.storage.createSparseFile(inner_path, 100 * 1024 * 1024)
+
+ # Write to file beginning
+ s = time.time()
+ f = site.storage.write("%s|%s-%s" % (inner_path, 0, 1024 * 1024), b"hellostart" * 1024)
+ time_write_start = time.time() - s
+
+ # Write to file end
+ s = time.time()
+ f = site.storage.write("%s|%s-%s" % (inner_path, 99 * 1024 * 1024, 99 * 1024 * 1024 + 1024 * 1024), b"helloend" * 1024)
+ time_write_end = time.time() - s
+
+ # Verify writes
+ f = site.storage.open(inner_path)
+ assert f.read(10) == b"hellostart"
+ f.seek(99 * 1024 * 1024)
+ assert f.read(8) == b"helloend"
+ f.close()
+
+ site.storage.delete(inner_path)
+
+ # Writing to end shold not take much longer, than writing to start
+ assert time_write_end <= max(0.1, time_write_start * 1.1)
+
+ def testRangedFileRequest(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ file_server.sites[site.address] = site
+ client = FileServer(file_server.ip, 1545)
+ client.sites[site_temp.address] = site_temp
+ site_temp.connection_server = client
+ connection = client.getConnection(file_server.ip, 1544)
+
+ # Add file_server as peer to client
+ peer_file_server = site_temp.addPeer(file_server.ip, 1544)
+
+ buff = peer_file_server.getFile(site_temp.address, "%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+
+ assert len(buff.getvalue()) == 1 * 1024 * 1024 # Correct block size
+ assert buff.getvalue().startswith(b"Test524") # Correct data
+ buff.seek(0)
+ assert site.content_manager.verifyPiece(inner_path, 5 * 1024 * 1024, buff) # Correct hash
+
+ connection.close()
+ client.stop()
+
+ def testRangedFileDownload(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Make sure the file and the piecemap in the optional hashfield
+ file_info = site.content_manager.getFileInfo(inner_path)
+ assert site.content_manager.hashfield.hasHash(file_info["sha512"])
+
+ piecemap_hash = site.content_manager.getFileInfo(file_info["piecemap"])["sha512"]
+ assert site.content_manager.hashfield.hasHash(piecemap_hash)
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ peer_client = site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ bad_files = site_temp.storage.verifyFiles(quick_check=True)["bad_files"]
+ assert not bad_files
+
+ # client_piecefield = peer_client.piecefields[file_info["sha512"]].tostring()
+ # assert client_piecefield == "1" * 10
+
+ # Download 5. and 10. block
+
+ site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+ site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
+
+ # Verify 0. block not downloaded
+ f = site_temp.storage.open(inner_path)
+ assert f.read(10) == b"\0" * 10
+ # Verify 5. and 10. block downloaded
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+ f.seek(9 * 1024 * 1024)
+ assert f.read(7) == b"943---T"
+
+ # Verify hashfield
+ assert set(site_temp.content_manager.hashfield) == set([18343, 43727]) # 18343: data/optional.any.iso, 43727: data/optional.any.iso.hashmap.msgpack
+
+ def testOpenBigfile(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+
+ f.seek(9 * 1024 * 1024)
+ assert f.read(7) == b"943---T"
+
+ assert len(requests) == 4 # 1x peicemap + 1x getpiecefield + 2x for pieces
+
+ assert set(site_temp.content_manager.hashfield) == set([18343, 43727])
+
+ assert site_temp.storage.piecefields[f.sha512].tostring() == "0000010001"
+ assert f.sha512 in site_temp.getSettingsCache()["piecefields"]
+
+ # Test requesting already downloaded
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+
+ assert len(requests) == 0
+
+ # Test requesting multi-block overflow reads
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024) # We already have this block
+ data = f.read(1024 * 1024 * 3) # Our read overflow to 6. and 7. block
+ assert data.startswith(b"Test524")
+ assert data.endswith(b"Test838-")
+ assert b"\0" not in data # No null bytes allowed
+
+ assert len(requests) == 2 # Two block download
+
+ # Test out of range request
+ f.seek(5 * 1024 * 1024)
+ data = f.read(1024 * 1024 * 30)
+ assert len(data) == 10 * 1000 * 1000 - (5 * 1024 * 1024)
+
+ f.seek(30 * 1024 * 1024)
+ data = f.read(1024 * 1024 * 30)
+ assert len(data) == 0
+
+ @pytest.mark.parametrize("piecefield_obj", [BigfilePiecefield, BigfilePiecefieldPacked])
+ def testPiecefield(self, piecefield_obj, site):
+ testdatas = [
+ b"\x01" * 100 + b"\x00" * 900 + b"\x01" * 4000 + b"\x00" * 4999 + b"\x01",
+ b"\x00\x01\x00\x01\x00\x01" * 10 + b"\x00\x01" * 90 + b"\x01\x00" * 400 + b"\x00" * 4999,
+ b"\x01" * 10000,
+ b"\x00" * 10000
+ ]
+ for testdata in testdatas:
+ piecefield = piecefield_obj()
+
+ piecefield.frombytes(testdata)
+ assert piecefield.tobytes() == testdata
+ assert piecefield[0] == testdata[0]
+ assert piecefield[100] == testdata[100]
+ assert piecefield[1000] == testdata[1000]
+ assert piecefield[len(testdata) - 1] == testdata[len(testdata) - 1]
+
+ packed = piecefield.pack()
+ piecefield_new = piecefield_obj()
+ piecefield_new.unpack(packed)
+ assert piecefield.tobytes() == piecefield_new.tobytes()
+ assert piecefield_new.tobytes() == testdata
+
+ def testFileGet(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Download second block
+ with site_temp.storage.openBigfile(inner_path) as f:
+ f.seek(1024 * 1024)
+ assert f.read(1024)[0:1] != b"\0"
+
+ # Make sure first block not download
+ with site_temp.storage.open(inner_path) as f:
+ assert f.read(1024)[0:1] == b"\0"
+
+ peer2 = site.addPeer(file_server.ip, 1545, return_peer=True)
+
+ # Should drop error on first block request
+ assert not peer2.getFile(site.address, "%s|0-%s" % (inner_path, 1024 * 1024 * 1))
+
+ # Should not drop error for second block request
+ assert peer2.getFile(site.address, "%s|%s-%s" % (inner_path, 1024 * 1024 * 1, 1024 * 1024 * 2))
+
+ def benchmarkPeerMemory(self, site, file_server):
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ import psutil, os
+ meminfo = psutil.Process(os.getpid()).memory_info
+
+ mem_s = meminfo()[0]
+ s = time.time()
+ for i in range(25000):
+ site.addPeer(file_server.ip, i)
+ print("%.3fs MEM: + %sKB" % (time.time() - s, (meminfo()[0] - mem_s) / 1024)) # 0.082s MEM: + 6800KB
+ print(list(site.peers.values())[0].piecefields)
+
+ def testUpdatePiecefield(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+
+ # Add file_server as peer to client
+ server2_peer1 = site_temp.addPeer(file_server.ip, 1544)
+
+ # Testing piecefield sync
+ assert len(server2_peer1.piecefields) == 0
+ assert server2_peer1.updatePiecefields() # Query piecefields from peer
+ assert len(server2_peer1.piecefields) > 0
+
+ def testWorkerManagerPiecefieldDeny(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+
+ # Add file_server as peer to client
+ server2_peer1 = site_temp.addPeer(file_server.ip, 1544) # Working
+
+ site_temp.downloadContent("content.json", download_files=False)
+ site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
+
+ # Add fake peers with optional files downloaded
+ for i in range(5):
+ fake_peer = site_temp.addPeer("127.0.1.%s" % i, 1544)
+ fake_peer.hashfield = site.content_manager.hashfield
+ fake_peer.has_hashfield = True
+
+ with Spy.Spy(WorkerManager, "addWorker") as requests:
+ site_temp.needFile("%s|%s-%s" % (inner_path, 5 * 1024 * 1024, 6 * 1024 * 1024))
+ site_temp.needFile("%s|%s-%s" % (inner_path, 6 * 1024 * 1024, 7 * 1024 * 1024))
+
+ # It should only request parts from peer1 as the other peers does not have the requested parts in piecefields
+ assert len([request[1] for request in requests if request[1] != server2_peer1]) == 0
+
+ def testWorkerManagerPiecefieldDownload(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ server1 = file_server
+ server1.sites[site.address] = site
+ server2 = FileServer(file_server.ip, 1545)
+ server2.sites[site_temp.address] = site_temp
+ site_temp.connection_server = server2
+ sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
+
+ # Create 10 fake peer for each piece
+ for i in range(10):
+ peer = Peer(file_server.ip, 1544, site_temp, server2)
+ peer.piecefields[sha512][i] = b"\x01"
+ peer.updateHashfield = mock.MagicMock(return_value=False)
+ peer.updatePiecefields = mock.MagicMock(return_value=False)
+ peer.findHashIds = mock.MagicMock(return_value={"nope": []})
+ peer.hashfield = site.content_manager.hashfield
+ peer.has_hashfield = True
+ peer.key = "Peer:%s" % i
+ site_temp.peers["Peer:%s" % i] = peer
+
+ site_temp.downloadContent("content.json", download_files=False)
+ site_temp.needFile("data/optional.any.iso.piecemap.msgpack")
+
+ with Spy.Spy(Peer, "getFile") as requests:
+ for i in range(10):
+ site_temp.needFile("%s|%s-%s" % (inner_path, i * 1024 * 1024, (i + 1) * 1024 * 1024))
+
+ assert len(requests) == 10
+ for i in range(10):
+ assert requests[i][0] == site_temp.peers["Peer:%s" % i] # Every part should be requested from piece owner peer
+
+ def testDownloadStats(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ # Check size before downloads
+ assert site_temp.settings["size"] < 10 * 1024 * 1024
+ assert site_temp.settings["optional_downloaded"] == 0
+ size_piecemap = site_temp.content_manager.getFileInfo(inner_path + ".piecemap.msgpack")["size"]
+ size_bigfile = site_temp.content_manager.getFileInfo(inner_path)["size"]
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ assert b"\0" not in f.read(1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ with site_temp.storage.openBigfile(inner_path) as f:
+ # Don't count twice
+ assert b"\0" not in f.read(1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ # Add second block
+ assert b"\0" not in f.read(1024 * 1024)
+ assert site_temp.settings["optional_downloaded"] == size_piecemap + size_bigfile
+
+ def testPrebuffer(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with site_temp.storage.openBigfile(inner_path, prebuffer=1024 * 1024 * 2) as f:
+ with Spy.Spy(FileRequest, "route") as requests:
+ f.seek(5 * 1024 * 1024)
+ assert f.read(7) == b"Test524"
+ # assert len(requests) == 3 # 1x piecemap + 1x getpiecefield + 1x for pieces
+ assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 2
+
+ time.sleep(0.5) # Wait prebuffer download
+
+ sha512 = site.content_manager.getFileInfo(inner_path)["sha512"]
+ assert site_temp.storage.piecefields[sha512].tostring() == "0000011100"
+
+ # No prebuffer beyond end of the file
+ f.seek(9 * 1024 * 1024)
+ assert b"\0" not in f.read(7)
+
+ assert len([task for task in site_temp.worker_manager.tasks if task["inner_path"].startswith(inner_path)]) == 0
+
+ def testDownloadAllPieces(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|all" % inner_path)
+
+ assert len(requests) == 12 # piecemap.msgpack, getPiecefields, 10 x piece
+
+ # Don't re-download already got pieces
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|all" % inner_path)
+
+ assert len(requests) == 0
+
+ def testFileSize(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ client = ConnectionServer(file_server.ip, 1545)
+ site_temp.connection_server = client
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ # Open virtual file
+ assert not site_temp.storage.isFile(inner_path)
+
+ # Download first block
+ site_temp.needFile("%s|%s-%s" % (inner_path, 0 * 1024 * 1024, 1 * 1024 * 1024))
+ assert site_temp.storage.getSize(inner_path) < 1000 * 1000 * 10 # Size on the disk should be smaller than the real size
+
+ site_temp.needFile("%s|%s-%s" % (inner_path, 9 * 1024 * 1024, 10 * 1024 * 1024))
+ assert site_temp.storage.getSize(inner_path) == site.storage.getSize(inner_path)
+
+ def testFileRename(self, file_server, site, site_temp):
+ inner_path = self.createBigfile(site)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site_temp.needFile("%s|%s-%s" % (inner_path, 0, 1 * self.piece_size))
+
+ assert len([req for req in requests if req[1] == "streamFile"]) == 2 # 1 piece + piecemap
+
+ # Rename the file
+ inner_path_new = inner_path.replace(".iso", "-new.iso")
+ site.storage.rename(inner_path, inner_path_new)
+ site.storage.delete("data/optional.any.iso.piecemap.msgpack")
+ assert site.content_manager.sign("content.json", self.privatekey, remove_missing_optional=True)
+
+ files_optional = site.content_manager.contents["content.json"]["files_optional"].keys()
+
+ assert "data/optional.any-new.iso.piecemap.msgpack" in files_optional
+ assert "data/optional.any.iso.piecemap.msgpack" not in files_optional
+ assert "data/optional.any.iso" not in files_optional
+
+ with Spy.Spy(FileRequest, "route") as requests:
+ site.publish()
+ time.sleep(0.1)
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10) # Wait for download
+
+ assert len([req[1] for req in requests if req[1] == "streamFile"]) == 0
+
+ with site_temp.storage.openBigfile(inner_path_new, prebuffer=0) as f:
+ f.read(1024)
+
+ # First piece already downloaded
+ assert [req for req in requests if req[1] == "streamFile"] == []
+
+ # Second piece needs to be downloaded + changed piecemap
+ f.seek(self.piece_size)
+ f.read(1024)
+ assert [req[3]["inner_path"] for req in requests if req[1] == "streamFile"] == [inner_path_new + ".piecemap.msgpack", inner_path_new]
+
+ @pytest.mark.parametrize("size", [1024 * 3, 1024 * 1024 * 3, 1024 * 1024 * 30])
+ def testNullFileRead(self, file_server, site, site_temp, size):
+ inner_path = "data/optional.iso"
+
+ f = site.storage.open(inner_path, "w")
+ f.write("\0" * size)
+ f.close()
+ assert site.content_manager.sign("content.json", self.privatekey)
+
+ # Init source server
+ site.connection_server = file_server
+ file_server.sites[site.address] = site
+
+ # Init client server
+ site_temp.connection_server = FileServer(file_server.ip, 1545)
+ site_temp.connection_server.sites[site_temp.address] = site_temp
+ site_temp.addPeer(file_server.ip, 1544)
+
+ # Download site
+ site_temp.download(blind_includes=True, retry_bad_files=False).join(timeout=10)
+
+ if "piecemap" in site.content_manager.getFileInfo(inner_path): # Bigfile
+ site_temp.needFile(inner_path + "|all")
+ else:
+ site_temp.needFile(inner_path)
+
+
+ assert site_temp.storage.getSize(inner_path) == size
diff --git a/plugins/Bigfile/Test/conftest.py b/plugins/Bigfile/Test/conftest.py
new file mode 100644
index 00000000..634e66e2
--- /dev/null
+++ b/plugins/Bigfile/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
diff --git a/plugins/Bigfile/Test/pytest.ini b/plugins/Bigfile/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/Bigfile/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/Bigfile/__init__.py b/plugins/Bigfile/__init__.py
new file mode 100644
index 00000000..cf2dcb49
--- /dev/null
+++ b/plugins/Bigfile/__init__.py
@@ -0,0 +1,2 @@
+from . import BigfilePlugin
+from .BigfilePiecefield import BigfilePiecefield, BigfilePiecefieldPacked
\ No newline at end of file
diff --git a/plugins/Chart/ChartCollector.py b/plugins/Chart/ChartCollector.py
new file mode 100644
index 00000000..ceb16350
--- /dev/null
+++ b/plugins/Chart/ChartCollector.py
@@ -0,0 +1,181 @@
+import time
+import sys
+import collections
+import itertools
+import logging
+
+import gevent
+from util import helper
+from Config import config
+
+
+class ChartCollector(object):
+ def __init__(self, db):
+ self.db = db
+ if config.action == "main":
+ gevent.spawn_later(60 * 3, self.collector)
+ self.log = logging.getLogger("ChartCollector")
+ self.last_values = collections.defaultdict(dict)
+
+ def setInitialLastValues(self, sites):
+ # Recover last value of site bytes/sent
+ for site in sites:
+ self.last_values["site:" + site.address]["site_bytes_recv"] = site.settings.get("bytes_recv", 0)
+ self.last_values["site:" + site.address]["site_bytes_sent"] = site.settings.get("bytes_sent", 0)
+
+ def getCollectors(self):
+ collectors = {}
+ import main
+ file_server = main.file_server
+ sites = file_server.getSites()
+ if not sites:
+ return collectors
+ content_db = list(sites.values())[0].content_manager.contents.db
+
+ # Connection stats
+ collectors["connection"] = lambda: len(file_server.connections)
+ collectors["connection_in"] = (
+ lambda: len([1 for connection in file_server.connections if connection.type == "in"])
+ )
+ collectors["connection_onion"] = (
+ lambda: len([1 for connection in file_server.connections if connection.ip.endswith(".onion")])
+ )
+ collectors["connection_ping_avg"] = (
+ lambda: round(1000 * helper.avg(
+ [connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
+ ))
+ )
+ collectors["connection_ping_min"] = (
+ lambda: round(1000 * min(
+ [connection.last_ping_delay for connection in file_server.connections if connection.last_ping_delay]
+ ))
+ )
+ collectors["connection_rev_avg"] = (
+ lambda: helper.avg(
+ [connection.handshake["rev"] for connection in file_server.connections if connection.handshake]
+ )
+ )
+
+ # Request stats
+ collectors["file_bytes_recv|change"] = lambda: file_server.bytes_recv
+ collectors["file_bytes_sent|change"] = lambda: file_server.bytes_sent
+ collectors["request_num_recv|change"] = lambda: file_server.num_recv
+ collectors["request_num_sent|change"] = lambda: file_server.num_sent
+
+ # Limit
+ collectors["optional_limit"] = lambda: content_db.getOptionalLimitBytes()
+ collectors["optional_used"] = lambda: content_db.getOptionalUsedBytes()
+ collectors["optional_downloaded"] = lambda: sum([site.settings.get("optional_downloaded", 0) for site in sites.values()])
+
+ # Peers
+ collectors["peer"] = lambda peers: len(peers)
+ collectors["peer_onion"] = lambda peers: len([True for peer in peers if ".onion" in peer])
+
+ # Size
+ collectors["size"] = lambda: sum([site.settings.get("size", 0) for site in sites.values()])
+ collectors["size_optional"] = lambda: sum([site.settings.get("size_optional", 0) for site in sites.values()])
+ collectors["content"] = lambda: sum([len(site.content_manager.contents) for site in sites.values()])
+
+ return collectors
+
+ def getSiteCollectors(self):
+ site_collectors = {}
+
+ # Size
+ site_collectors["site_size"] = lambda site: site.settings.get("size", 0)
+ site_collectors["site_size_optional"] = lambda site: site.settings.get("size_optional", 0)
+ site_collectors["site_optional_downloaded"] = lambda site: site.settings.get("optional_downloaded", 0)
+ site_collectors["site_content"] = lambda site: len(site.content_manager.contents)
+
+ # Data transfer
+ site_collectors["site_bytes_recv|change"] = lambda site: site.settings.get("bytes_recv", 0)
+ site_collectors["site_bytes_sent|change"] = lambda site: site.settings.get("bytes_sent", 0)
+
+ # Peers
+ site_collectors["site_peer"] = lambda site: len(site.peers)
+ site_collectors["site_peer_onion"] = lambda site: len(
+ [True for peer in site.peers.values() if peer.ip.endswith(".onion")]
+ )
+ site_collectors["site_peer_connected"] = lambda site: len([True for peer in site.peers.values() if peer.connection])
+
+ return site_collectors
+
+ def getUniquePeers(self):
+ import main
+ sites = main.file_server.getSites()
+ return set(itertools.chain.from_iterable(
+ [site.peers.keys() for site in sites.values()]
+ ))
+
+ def collectDatas(self, collectors, last_values, site=None):
+ if site is None:
+ peers = self.getUniquePeers()
+ datas = {}
+ for key, collector in collectors.items():
+ try:
+ if site:
+ value = collector(site)
+ elif key.startswith("peer"):
+ value = collector(peers)
+ else:
+ value = collector()
+ except ValueError:
+ value = None
+ except Exception as err:
+ self.log.info("Collector %s error: %s" % (key, err))
+ value = None
+
+ if "|change" in key: # Store changes relative to last value
+ key = key.replace("|change", "")
+ last_value = last_values.get(key, 0)
+ last_values[key] = value
+ value = value - last_value
+
+ if value is None:
+ datas[key] = None
+ else:
+ datas[key] = round(value, 3)
+ return datas
+
+ def collectGlobal(self, collectors, last_values):
+ now = int(time.time())
+ s = time.time()
+ datas = self.collectDatas(collectors, last_values["global"])
+ values = []
+ for key, value in datas.items():
+ values.append((self.db.getTypeId(key), value, now))
+ self.log.debug("Global collectors done in %.3fs" % (time.time() - s))
+
+ s = time.time()
+ cur = self.db.getCursor()
+ cur.executemany("INSERT INTO data (type_id, value, date_added) VALUES (?, ?, ?)", values)
+ self.log.debug("Global collectors inserted in %.3fs" % (time.time() - s))
+
+ def collectSites(self, sites, collectors, last_values):
+ now = int(time.time())
+ s = time.time()
+ values = []
+ for address, site in list(sites.items()):
+ site_datas = self.collectDatas(collectors, last_values["site:%s" % address], site)
+ for key, value in site_datas.items():
+ values.append((self.db.getTypeId(key), self.db.getSiteId(address), value, now))
+ time.sleep(0.001)
+ self.log.debug("Site collections done in %.3fs" % (time.time() - s))
+
+ s = time.time()
+ cur = self.db.getCursor()
+ cur.executemany("INSERT INTO data (type_id, site_id, value, date_added) VALUES (?, ?, ?, ?)", values)
+ self.log.debug("Site collectors inserted in %.3fs" % (time.time() - s))
+
+ def collector(self):
+ collectors = self.getCollectors()
+ site_collectors = self.getSiteCollectors()
+ import main
+ sites = main.file_server.getSites()
+ i = 0
+ while 1:
+ self.collectGlobal(collectors, self.last_values)
+ if i % 12 == 0: # Only collect sites data every hour
+ self.collectSites(sites, site_collectors, self.last_values)
+ time.sleep(60 * 5)
+ i += 1
diff --git a/plugins/Chart/ChartDb.py b/plugins/Chart/ChartDb.py
new file mode 100644
index 00000000..66a22082
--- /dev/null
+++ b/plugins/Chart/ChartDb.py
@@ -0,0 +1,133 @@
+from Config import config
+from Db.Db import Db
+import time
+
+
+class ChartDb(Db):
+ def __init__(self):
+ self.version = 2
+ super(ChartDb, self).__init__(self.getSchema(), "%s/chart.db" % config.data_dir)
+ self.foreign_keys = True
+ self.checkTables()
+ self.sites = self.loadSites()
+ self.types = self.loadTypes()
+
+ def getSchema(self):
+ schema = {}
+ schema["db_name"] = "Chart"
+ schema["tables"] = {}
+ schema["tables"]["data"] = {
+ "cols": [
+ ["data_id", "INTEGER PRIMARY KEY ASC AUTOINCREMENT NOT NULL UNIQUE"],
+ ["type_id", "INTEGER NOT NULL"],
+ ["site_id", "INTEGER"],
+ ["value", "INTEGER"],
+ ["date_added", "DATETIME DEFAULT (CURRENT_TIMESTAMP)"]
+ ],
+ "indexes": [
+ "CREATE INDEX site_id ON data (site_id)",
+ "CREATE INDEX date_added ON data (date_added)"
+ ],
+ "schema_changed": 2
+ }
+ schema["tables"]["type"] = {
+ "cols": [
+ ["type_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
+ ["name", "TEXT"]
+ ],
+ "schema_changed": 1
+ }
+ schema["tables"]["site"] = {
+ "cols": [
+ ["site_id", "INTEGER PRIMARY KEY NOT NULL UNIQUE"],
+ ["address", "TEXT"]
+ ],
+ "schema_changed": 1
+ }
+ return schema
+
+ def getTypeId(self, name):
+ if name not in self.types:
+ res = self.execute("INSERT INTO type ?", {"name": name})
+ self.types[name] = res.lastrowid
+
+ return self.types[name]
+
+ def getSiteId(self, address):
+ if address not in self.sites:
+ res = self.execute("INSERT INTO site ?", {"address": address})
+ self.sites[address] = res.lastrowid
+
+ return self.sites[address]
+
+ def loadSites(self):
+ sites = {}
+ for row in self.execute("SELECT * FROM site"):
+ sites[row["address"]] = row["site_id"]
+ return sites
+
+ def loadTypes(self):
+ types = {}
+ for row in self.execute("SELECT * FROM type"):
+ types[row["name"]] = row["type_id"]
+ return types
+
+ def deleteSite(self, address):
+ if address in self.sites:
+ site_id = self.sites[address]
+ del self.sites[address]
+ self.execute("DELETE FROM site WHERE ?", {"site_id": site_id})
+ self.execute("DELETE FROM data WHERE ?", {"site_id": site_id})
+
+ def archive(self):
+ week_back = 1
+ while 1:
+ s = time.time()
+ date_added_from = time.time() - 60 * 60 * 24 * 7 * (week_back + 1)
+ date_added_to = date_added_from + 60 * 60 * 24 * 7
+ res = self.execute("""
+ SELECT
+ MAX(date_added) AS date_added,
+ SUM(value) AS value,
+ GROUP_CONCAT(data_id) AS data_ids,
+ type_id,
+ site_id,
+ COUNT(*) AS num
+ FROM data
+ WHERE
+ site_id IS NULL AND
+ date_added > :date_added_from AND
+ date_added < :date_added_to
+ GROUP BY strftime('%Y-%m-%d %H', date_added, 'unixepoch', 'localtime'), type_id
+ """, {"date_added_from": date_added_from, "date_added_to": date_added_to})
+
+ num_archived = 0
+ cur = self.getCursor()
+ for row in res:
+ if row["num"] == 1:
+ continue
+ cur.execute("INSERT INTO data ?", {
+ "type_id": row["type_id"],
+ "site_id": row["site_id"],
+ "value": row["value"],
+ "date_added": row["date_added"]
+ })
+ cur.execute("DELETE FROM data WHERE data_id IN (%s)" % row["data_ids"])
+ num_archived += row["num"]
+ self.log.debug("Archived %s data from %s weeks ago in %.3fs" % (num_archived, week_back, time.time() - s))
+ week_back += 1
+ time.sleep(0.1)
+ if num_archived == 0:
+ break
+ # Only keep 6 month of global stats
+ self.execute(
+ "DELETE FROM data WHERE site_id IS NULL AND date_added < :date_added_limit",
+ {"date_added_limit": time.time() - 60 * 60 * 24 * 30 * 6 }
+ )
+ # Only keep 1 month of site stats
+ self.execute(
+ "DELETE FROM data WHERE site_id IS NOT NULL AND date_added < :date_added_limit",
+ {"date_added_limit": time.time() - 60 * 60 * 24 * 30 }
+ )
+ if week_back > 1:
+ self.execute("VACUUM")
diff --git a/plugins/Chart/ChartPlugin.py b/plugins/Chart/ChartPlugin.py
new file mode 100644
index 00000000..80a4d976
--- /dev/null
+++ b/plugins/Chart/ChartPlugin.py
@@ -0,0 +1,57 @@
+import time
+import itertools
+
+import gevent
+
+from Config import config
+from util import helper
+from util.Flag import flag
+from Plugin import PluginManager
+from .ChartDb import ChartDb
+from .ChartCollector import ChartCollector
+
+if "db" not in locals().keys(): # Share on reloads
+ db = ChartDb()
+ gevent.spawn_later(10 * 60, db.archive)
+ helper.timer(60 * 60 * 6, db.archive)
+ collector = ChartCollector(db)
+
+@PluginManager.registerTo("SiteManager")
+class SiteManagerPlugin(object):
+ def load(self, *args, **kwargs):
+ back = super(SiteManagerPlugin, self).load(*args, **kwargs)
+ collector.setInitialLastValues(self.sites.values())
+ return back
+
+ def delete(self, address, *args, **kwargs):
+ db.deleteSite(address)
+ return super(SiteManagerPlugin, self).delete(address, *args, **kwargs)
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ @flag.admin
+ def actionChartDbQuery(self, to, query, params=None):
+ if config.debug or config.verbose:
+ s = time.time()
+ rows = []
+ try:
+ if not query.strip().upper().startswith("SELECT"):
+ raise Exception("Only SELECT query supported")
+ res = db.execute(query, params)
+ except Exception as err: # Response the error to client
+ self.log.error("ChartDbQuery error: %s" % err)
+ return {"error": str(err)}
+ # Convert result to dict
+ for row in res:
+ rows.append(dict(row))
+ if config.verbose and time.time() - s > 0.1: # Log slow query
+ self.log.debug("Slow query: %s (%.3fs)" % (query, time.time() - s))
+ return rows
+
+ @flag.admin
+ def actionChartGetPeerLocations(self, to):
+ peers = {}
+ for site in self.server.sites.values():
+ peers.update(site.peers)
+ peer_locations = self.getPeerLocations(peers)
+ return peer_locations
diff --git a/plugins/Chart/__init__.py b/plugins/Chart/__init__.py
new file mode 100644
index 00000000..2c284609
--- /dev/null
+++ b/plugins/Chart/__init__.py
@@ -0,0 +1 @@
+from . import ChartPlugin
\ No newline at end of file
diff --git a/plugins/Chart/plugin_info.json b/plugins/Chart/plugin_info.json
new file mode 100644
index 00000000..3bdaea8a
--- /dev/null
+++ b/plugins/Chart/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "Chart",
+ "description": "Collect and provide stats of client information.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/ContentFilter/ContentFilterPlugin.py b/plugins/ContentFilter/ContentFilterPlugin.py
new file mode 100644
index 00000000..6bd8c7f7
--- /dev/null
+++ b/plugins/ContentFilter/ContentFilterPlugin.py
@@ -0,0 +1,270 @@
+import time
+import re
+import html
+import os
+
+from Plugin import PluginManager
+from Translate import Translate
+from Config import config
+from util.Flag import flag
+
+from .ContentFilterStorage import ContentFilterStorage
+
+
+plugin_dir = os.path.dirname(__file__)
+
+if "_" not in locals():
+ _ = Translate(plugin_dir + "/languages/")
+
+
+@PluginManager.registerTo("SiteManager")
+class SiteManagerPlugin(object):
+ def load(self, *args, **kwargs):
+ global filter_storage
+ super(SiteManagerPlugin, self).load(*args, **kwargs)
+ filter_storage = ContentFilterStorage(site_manager=self)
+
+ def isAddressBlocked(self, address):
+ # FIXME: code duplication of isSiteblocked(address) or isSiteblocked(address_hashed)
+ # in several places here and below
+ address_hashed = filter_storage.getSiteAddressHashed(address)
+ if filter_storage.isSiteblocked(address) or filter_storage.isSiteblocked(address_hashed):
+ return True
+ return super(SiteManagerPlugin, self).isAddressBlocked(address)
+
+ def add(self, address, *args, **kwargs):
+ should_ignore_block = kwargs.get("ignore_block") or kwargs.get("settings")
+ if should_ignore_block:
+ block_details = None
+ elif filter_storage.isSiteblocked(address):
+ block_details = filter_storage.getSiteblockDetails(address)
+ else:
+ address_hashed = filter_storage.getSiteAddressHashed(address)
+ if filter_storage.isSiteblocked(address_hashed):
+ block_details = filter_storage.getSiteblockDetails(address_hashed)
+ else:
+ block_details = None
+
+ if block_details:
+ raise Exception("Site blocked: %s" % html.escape(block_details.get("reason", "unknown reason")))
+ else:
+ return super(SiteManagerPlugin, self).add(address, *args, **kwargs)
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ # Mute
+ def cbMuteAdd(self, to, auth_address, cert_user_id, reason):
+ filter_storage.file_content["mutes"][auth_address] = {
+ "cert_user_id": cert_user_id, "reason": reason, "source": self.site.address, "date_added": time.time()
+ }
+ filter_storage.save()
+ filter_storage.changeDbs(auth_address, "remove")
+ self.response(to, "ok")
+
+ @flag.no_multiuser
+ def actionMuteAdd(self, to, auth_address, cert_user_id, reason):
+ if "ADMIN" in self.getPermissions(to):
+ self.cbMuteAdd(to, auth_address, cert_user_id, reason)
+ else:
+ self.cmd(
+ "confirm",
+ [_["Hide all content from %s?"] % html.escape(cert_user_id), _["Mute"]],
+ lambda res: self.cbMuteAdd(to, auth_address, cert_user_id, reason)
+ )
+
+ @flag.no_multiuser
+ def cbMuteRemove(self, to, auth_address):
+ del filter_storage.file_content["mutes"][auth_address]
+ filter_storage.save()
+ filter_storage.changeDbs(auth_address, "load")
+ self.response(to, "ok")
+
+ @flag.no_multiuser
+ def actionMuteRemove(self, to, auth_address):
+ if "ADMIN" in self.getPermissions(to):
+ self.cbMuteRemove(to, auth_address)
+ else:
+ cert_user_id = html.escape(filter_storage.file_content["mutes"][auth_address]["cert_user_id"])
+ self.cmd(
+ "confirm",
+ [_["Unmute %s?"] % cert_user_id, _["Unmute"]],
+ lambda res: self.cbMuteRemove(to, auth_address)
+ )
+
+ @flag.admin
+ def actionMuteList(self, to):
+ self.response(to, filter_storage.file_content["mutes"])
+
+ # Siteblock
+ @flag.no_multiuser
+ @flag.admin
+ def actionSiteblockIgnoreAddSite(self, to, site_address):
+ if site_address in filter_storage.site_manager.sites:
+ return {"error": "Site already added"}
+ else:
+ if filter_storage.site_manager.need(site_address, ignore_block=True):
+ return "ok"
+ else:
+ return {"error": "Invalid address"}
+
+ @flag.no_multiuser
+ @flag.admin
+ def actionSiteblockAdd(self, to, site_address, reason=None):
+ filter_storage.file_content["siteblocks"][site_address] = {"date_added": time.time(), "reason": reason}
+ filter_storage.save()
+ self.response(to, "ok")
+
+ @flag.no_multiuser
+ @flag.admin
+ def actionSiteblockRemove(self, to, site_address):
+ del filter_storage.file_content["siteblocks"][site_address]
+ filter_storage.save()
+ self.response(to, "ok")
+
+ @flag.admin
+ def actionSiteblockList(self, to):
+ self.response(to, filter_storage.file_content["siteblocks"])
+
+ @flag.admin
+ def actionSiteblockGet(self, to, site_address):
+ if filter_storage.isSiteblocked(site_address):
+ res = filter_storage.getSiteblockDetails(site_address)
+ else:
+ site_address_hashed = filter_storage.getSiteAddressHashed(site_address)
+ if filter_storage.isSiteblocked(site_address_hashed):
+ res = filter_storage.getSiteblockDetails(site_address_hashed)
+ else:
+ res = {"error": "Site block not found"}
+ self.response(to, res)
+
+ # Include
+ @flag.no_multiuser
+ def actionFilterIncludeAdd(self, to, inner_path, description=None, address=None):
+ if address:
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
+ site = self.server.sites[address]
+ else:
+ address = self.site.address
+ site = self.site
+
+ if "ADMIN" in self.getPermissions(to):
+ self.cbFilterIncludeAdd(to, True, address, inner_path, description)
+ else:
+ content = site.storage.loadJson(inner_path)
+ title = _["New shared global content filter: %s (%s sites, %s users)"] % (
+ html.escape(inner_path), len(content.get("siteblocks", {})), len(content.get("mutes", {}))
+ )
+
+ self.cmd(
+ "confirm",
+ [title, "Add"],
+ lambda res: self.cbFilterIncludeAdd(to, res, address, inner_path, description)
+ )
+
+ def cbFilterIncludeAdd(self, to, res, address, inner_path, description):
+ if not res:
+ self.response(to, res)
+ return False
+
+ filter_storage.includeAdd(address, inner_path, description)
+ self.response(to, "ok")
+
+ @flag.no_multiuser
+ def actionFilterIncludeRemove(self, to, inner_path, address=None):
+ if address:
+ if "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can manage different site include"})
+ else:
+ address = self.site.address
+
+ key = "%s/%s" % (address, inner_path)
+ if key not in filter_storage.file_content["includes"]:
+ self.response(to, {"error": "Include not found"})
+ filter_storage.includeRemove(address, inner_path)
+ self.response(to, "ok")
+
+ def actionFilterIncludeList(self, to, all_sites=False, filters=False):
+ if all_sites and "ADMIN" not in self.getPermissions(to):
+ return self.response(to, {"error": "Forbidden: Only ADMIN sites can list all sites includes"})
+
+ back = []
+ includes = filter_storage.file_content.get("includes", {}).values()
+ for include in includes:
+ if not all_sites and include["address"] != self.site.address:
+ continue
+ if filters:
+ include = dict(include) # Don't modify original file_content
+ include_site = filter_storage.site_manager.get(include["address"])
+ if not include_site:
+ continue
+ content = include_site.storage.loadJson(include["inner_path"])
+ include["mutes"] = content.get("mutes", {})
+ include["siteblocks"] = content.get("siteblocks", {})
+ back.append(include)
+ self.response(to, back)
+
+
+@PluginManager.registerTo("SiteStorage")
+class SiteStoragePlugin(object):
+ def updateDbFile(self, inner_path, file=None, cur=None):
+ if file is not False: # File deletion always allowed
+ # Find for bitcoin addresses in file path
+ matches = re.findall("/(1[A-Za-z0-9]{26,35})/", inner_path)
+ # Check if any of the adresses are in the mute list
+ for auth_address in matches:
+ if filter_storage.isMuted(auth_address):
+ self.log.debug("Mute match: %s, ignoring %s" % (auth_address, inner_path))
+ return False
+
+ return super(SiteStoragePlugin, self).updateDbFile(inner_path, file=file, cur=cur)
+
+ def onUpdated(self, inner_path, file=None):
+ file_path = "%s/%s" % (self.site.address, inner_path)
+ if file_path in filter_storage.file_content["includes"]:
+ self.log.debug("Filter file updated: %s" % inner_path)
+ filter_storage.includeUpdateAll()
+ return super(SiteStoragePlugin, self).onUpdated(inner_path, file=file)
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ def actionWrapper(self, path, extra_headers=None):
+ match = re.match(r"/(?P[A-Za-z0-9\._-]+)(?P/.*|$)", path)
+ if not match:
+ return False
+ address = match.group("address")
+
+ if self.server.site_manager.get(address): # Site already exists
+ return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
+
+ if self.isDomain(address):
+ address = self.resolveDomain(address)
+
+ if address:
+ address_hashed = filter_storage.getSiteAddressHashed(address)
+ else:
+ address_hashed = None
+
+ if filter_storage.isSiteblocked(address) or filter_storage.isSiteblocked(address_hashed):
+ site = self.server.site_manager.get(config.homepage)
+ if not extra_headers:
+ extra_headers = {}
+
+ script_nonce = self.getScriptNonce()
+
+ self.sendHeader(extra_headers=extra_headers, script_nonce=script_nonce)
+ return iter([super(UiRequestPlugin, self).renderWrapper(
+ site, path, "uimedia/plugins/contentfilter/blocklisted.html?address=" + address,
+ "Blacklisted site", extra_headers, show_loadingscreen=False, script_nonce=script_nonce
+ )])
+ else:
+ return super(UiRequestPlugin, self).actionWrapper(path, extra_headers)
+
+ def actionUiMedia(self, path, *args, **kwargs):
+ if path.startswith("/uimedia/plugins/contentfilter/"):
+ file_path = path.replace("/uimedia/plugins/contentfilter/", plugin_dir + "/media/")
+ return self.actionFile(file_path)
+ else:
+ return super(UiRequestPlugin, self).actionUiMedia(path)
diff --git a/plugins/ContentFilter/ContentFilterStorage.py b/plugins/ContentFilter/ContentFilterStorage.py
new file mode 100644
index 00000000..289ec2a9
--- /dev/null
+++ b/plugins/ContentFilter/ContentFilterStorage.py
@@ -0,0 +1,164 @@
+import os
+import json
+import logging
+import collections
+import time
+import hashlib
+
+from Debug import Debug
+from Plugin import PluginManager
+from Config import config
+from util import helper
+
+
+class ContentFilterStorage(object):
+ def __init__(self, site_manager):
+ self.log = logging.getLogger("ContentFilterStorage")
+ self.file_path = "%s/filters.json" % config.data_dir
+ self.site_manager = site_manager
+ self.file_content = self.load()
+
+ # Set default values for filters.json
+ if not self.file_content:
+ self.file_content = {}
+
+ # Site blacklist renamed to site blocks
+ if "site_blacklist" in self.file_content:
+ self.file_content["siteblocks"] = self.file_content["site_blacklist"]
+ del self.file_content["site_blacklist"]
+
+ for key in ["mutes", "siteblocks", "includes"]:
+ if key not in self.file_content:
+ self.file_content[key] = {}
+
+ self.include_filters = collections.defaultdict(set) # Merged list of mutes and blacklists from all include
+ self.includeUpdateAll(update_site_dbs=False)
+
+ def load(self):
+ # Rename previously used mutes.json -> filters.json
+ if os.path.isfile("%s/mutes.json" % config.data_dir):
+ self.log.info("Renaming mutes.json to filters.json...")
+ os.rename("%s/mutes.json" % config.data_dir, self.file_path)
+ if os.path.isfile(self.file_path):
+ try:
+ return json.load(open(self.file_path))
+ except Exception as err:
+ self.log.error("Error loading filters.json: %s" % err)
+ return None
+ else:
+ return None
+
+ def includeUpdateAll(self, update_site_dbs=True):
+ s = time.time()
+ new_include_filters = collections.defaultdict(set)
+
+ # Load all include files data into a merged set
+ for include_path in self.file_content["includes"]:
+ address, inner_path = include_path.split("/", 1)
+ try:
+ content = self.site_manager.get(address).storage.loadJson(inner_path)
+ except Exception as err:
+ self.log.warning(
+ "Error loading include %s: %s" %
+ (include_path, Debug.formatException(err))
+ )
+ continue
+
+ for key, val in content.items():
+ if type(val) is not dict:
+ continue
+
+ new_include_filters[key].update(val.keys())
+
+ mutes_added = new_include_filters["mutes"].difference(self.include_filters["mutes"])
+ mutes_removed = self.include_filters["mutes"].difference(new_include_filters["mutes"])
+
+ self.include_filters = new_include_filters
+
+ if update_site_dbs:
+ for auth_address in mutes_added:
+ self.changeDbs(auth_address, "remove")
+
+ for auth_address in mutes_removed:
+ if not self.isMuted(auth_address):
+ self.changeDbs(auth_address, "load")
+
+ num_mutes = len(self.include_filters["mutes"])
+ num_siteblocks = len(self.include_filters["siteblocks"])
+ self.log.debug(
+ "Loaded %s mutes, %s blocked sites from %s includes in %.3fs" %
+ (num_mutes, num_siteblocks, len(self.file_content["includes"]), time.time() - s)
+ )
+
+ def includeAdd(self, address, inner_path, description=None):
+ self.file_content["includes"]["%s/%s" % (address, inner_path)] = {
+ "date_added": time.time(),
+ "address": address,
+ "description": description,
+ "inner_path": inner_path
+ }
+ self.includeUpdateAll()
+ self.save()
+
+ def includeRemove(self, address, inner_path):
+ del self.file_content["includes"]["%s/%s" % (address, inner_path)]
+ self.includeUpdateAll()
+ self.save()
+
+ def save(self):
+ s = time.time()
+ helper.atomicWrite(self.file_path, json.dumps(self.file_content, indent=2, sort_keys=True).encode("utf8"))
+ self.log.debug("Saved in %.3fs" % (time.time() - s))
+
+ def isMuted(self, auth_address):
+ if auth_address in self.file_content["mutes"] or auth_address in self.include_filters["mutes"]:
+ return True
+ else:
+ return False
+
+ def getSiteAddressHashed(self, address):
+ return "0x" + hashlib.sha256(address.encode("ascii")).hexdigest()
+
+ def isSiteblocked(self, address):
+ if address in self.file_content["siteblocks"] or address in self.include_filters["siteblocks"]:
+ return True
+ return False
+
+ def getSiteblockDetails(self, address):
+ details = self.file_content["siteblocks"].get(address)
+ if not details:
+ address_sha256 = self.getSiteAddressHashed(address)
+ details = self.file_content["siteblocks"].get(address_sha256)
+
+ if not details:
+ includes = self.file_content.get("includes", {}).values()
+ for include in includes:
+ include_site = self.site_manager.get(include["address"])
+ if not include_site:
+ continue
+ content = include_site.storage.loadJson(include["inner_path"])
+ details = content.get("siteblocks", {}).get(address)
+ if details:
+ details["include"] = include
+ break
+
+ return details
+
+ # Search and remove or readd files of an user
+ def changeDbs(self, auth_address, action):
+ self.log.debug("Mute action %s on user %s" % (action, auth_address))
+ res = list(self.site_manager.list().values())[0].content_manager.contents.db.execute(
+ "SELECT * FROM content LEFT JOIN site USING (site_id) WHERE inner_path LIKE :inner_path",
+ {"inner_path": "%%/%s/%%" % auth_address}
+ )
+ for row in res:
+ site = self.site_manager.sites.get(row["address"])
+ if not site:
+ continue
+ dir_inner_path = helper.getDirname(row["inner_path"])
+ for file_name in site.storage.walk(dir_inner_path):
+ if action == "remove":
+ site.storage.onUpdated(dir_inner_path + file_name, False)
+ else:
+ site.storage.onUpdated(dir_inner_path + file_name)
+ site.onFileDone(dir_inner_path + file_name)
diff --git a/plugins/ContentFilter/Test/TestContentFilter.py b/plugins/ContentFilter/Test/TestContentFilter.py
new file mode 100644
index 00000000..e1b37b16
--- /dev/null
+++ b/plugins/ContentFilter/Test/TestContentFilter.py
@@ -0,0 +1,82 @@
+import pytest
+from ContentFilter import ContentFilterPlugin
+from Site import SiteManager
+
+
+@pytest.fixture
+def filter_storage():
+ ContentFilterPlugin.filter_storage = ContentFilterPlugin.ContentFilterStorage(SiteManager.site_manager)
+ return ContentFilterPlugin.filter_storage
+
+
+@pytest.mark.usefixtures("resetSettings")
+@pytest.mark.usefixtures("resetTempSettings")
+class TestContentFilter:
+ def createInclude(self, site):
+ site.storage.writeJson("filters.json", {
+ "mutes": {"1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C": {}},
+ "siteblocks": {site.address: {}}
+ })
+
+ def testIncludeLoad(self, site, filter_storage):
+ self.createInclude(site)
+ filter_storage.file_content["includes"]["%s/%s" % (site.address, "filters.json")] = {
+ "date_added": 1528295893,
+ }
+
+ assert not filter_storage.include_filters["mutes"]
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert not filter_storage.isSiteblocked(site.address)
+ filter_storage.includeUpdateAll(update_site_dbs=False)
+ assert len(filter_storage.include_filters["mutes"]) == 1
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert filter_storage.isSiteblocked(site.address)
+
+ def testIncludeAdd(self, site, filter_storage):
+ self.createInclude(site)
+ query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C'"
+ assert not filter_storage.isSiteblocked(site.address)
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ # Add include
+ filter_storage.includeAdd(site.address, "filters.json")
+
+ assert filter_storage.isSiteblocked(site.address)
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 0
+
+ # Remove include
+ filter_storage.includeRemove(site.address, "filters.json")
+
+ assert not filter_storage.isSiteblocked(site.address)
+ assert not filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ def testIncludeChange(self, site, filter_storage):
+ self.createInclude(site)
+ filter_storage.includeAdd(site.address, "filters.json")
+ assert filter_storage.isSiteblocked(site.address)
+ assert filter_storage.isMuted("1J6UrZMkarjVg5ax9W4qThir3BFUikbW6C")
+
+ # Add new blocked site
+ assert not filter_storage.isSiteblocked("1Hello")
+
+ filter_content = site.storage.loadJson("filters.json")
+ filter_content["siteblocks"]["1Hello"] = {}
+ site.storage.writeJson("filters.json", filter_content)
+
+ assert filter_storage.isSiteblocked("1Hello")
+
+ # Add new muted user
+ query_num_json = "SELECT COUNT(*) AS num FROM json WHERE directory = 'users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q'"
+ assert not filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 2
+
+ filter_content["mutes"]["1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q"] = {}
+ site.storage.writeJson("filters.json", filter_content)
+
+ assert filter_storage.isMuted("1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
+ assert site.storage.query(query_num_json).fetchone()["num"] == 0
+
+
diff --git a/plugins/ContentFilter/Test/conftest.py b/plugins/ContentFilter/Test/conftest.py
new file mode 100644
index 00000000..634e66e2
--- /dev/null
+++ b/plugins/ContentFilter/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
diff --git a/plugins/ContentFilter/Test/pytest.ini b/plugins/ContentFilter/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/ContentFilter/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/ContentFilter/__init__.py b/plugins/ContentFilter/__init__.py
new file mode 100644
index 00000000..2cbca8ee
--- /dev/null
+++ b/plugins/ContentFilter/__init__.py
@@ -0,0 +1 @@
+from . import ContentFilterPlugin
diff --git a/plugins/ContentFilter/languages/hu.json b/plugins/ContentFilter/languages/hu.json
new file mode 100644
index 00000000..9b57e697
--- /dev/null
+++ b/plugins/ContentFilter/languages/hu.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s tartalmaniak elrejtése?",
+ "Mute": "Elnémítás",
+ "Unmute %s?": "%s tartalmaniak megjelenítése?",
+ "Unmute": "Némítás visszavonása"
+}
diff --git a/plugins/ContentFilter/languages/it.json b/plugins/ContentFilter/languages/it.json
new file mode 100644
index 00000000..9a2c6761
--- /dev/null
+++ b/plugins/ContentFilter/languages/it.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s Vuoi nascondere i contenuti di questo utente ?",
+ "Mute": "Attiva Silenzia",
+ "Unmute %s?": "%s Vuoi mostrare i contenuti di questo utente ?",
+ "Unmute": "Disattiva Silenzia"
+}
diff --git a/plugins/ContentFilter/languages/jp.json b/plugins/ContentFilter/languages/jp.json
new file mode 100644
index 00000000..ef586a1a
--- /dev/null
+++ b/plugins/ContentFilter/languages/jp.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s のコンテンツをすべて隠しますか?",
+ "Mute": "ミュート",
+ "Unmute %s?": "%s のミュートを解除しますか?",
+ "Unmute": "ミュート解除"
+}
diff --git a/plugins/ContentFilter/languages/pt-br.json b/plugins/ContentFilter/languages/pt-br.json
new file mode 100644
index 00000000..3c6bfbdc
--- /dev/null
+++ b/plugins/ContentFilter/languages/pt-br.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "%s Ocultar todo o conteúdo de ?",
+ "Mute": "Ativar o Silêncio",
+ "Unmute %s?": "%s Você quer mostrar o conteúdo deste usuário ?",
+ "Unmute": "Desligar o silêncio"
+}
diff --git a/plugins/ContentFilter/languages/zh-tw.json b/plugins/ContentFilter/languages/zh-tw.json
new file mode 100644
index 00000000..0995f3a0
--- /dev/null
+++ b/plugins/ContentFilter/languages/zh-tw.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "屏蔽 %s 的所有內容?",
+ "Mute": "屏蔽",
+ "Unmute %s?": "對 %s 解除屏蔽?",
+ "Unmute": "解除屏蔽"
+}
diff --git a/plugins/ContentFilter/languages/zh.json b/plugins/ContentFilter/languages/zh.json
new file mode 100644
index 00000000..bf63f107
--- /dev/null
+++ b/plugins/ContentFilter/languages/zh.json
@@ -0,0 +1,6 @@
+{
+ "Hide all content from %s?": "屏蔽 %s 的所有内容?",
+ "Mute": "屏蔽",
+ "Unmute %s?": "对 %s 解除屏蔽?",
+ "Unmute": "解除屏蔽"
+}
diff --git a/plugins/ContentFilter/media/blocklisted.html b/plugins/ContentFilter/media/blocklisted.html
new file mode 100644
index 00000000..c9d201a9
--- /dev/null
+++ b/plugins/ContentFilter/media/blocklisted.html
@@ -0,0 +1,89 @@
+
+
+
+
+
+
+
Site blocked
+
This site is on your blocklist:
+
+
Too much image
+
on 2015-01-25 12:32:11
+
+
+
+
+
+
+
+
+
diff --git a/plugins/ContentFilter/media/js/ZeroFrame.js b/plugins/ContentFilter/media/js/ZeroFrame.js
new file mode 100644
index 00000000..d6facdbf
--- /dev/null
+++ b/plugins/ContentFilter/media/js/ZeroFrame.js
@@ -0,0 +1,119 @@
+// Version 1.0.0 - Initial release
+// Version 1.1.0 (2017-08-02) - Added cmdp function that returns promise instead of using callback
+// Version 1.2.0 (2017-08-02) - Added Ajax monkey patch to emulate XMLHttpRequest over ZeroFrame API
+
+const CMD_INNER_READY = 'innerReady'
+const CMD_RESPONSE = 'response'
+const CMD_WRAPPER_READY = 'wrapperReady'
+const CMD_PING = 'ping'
+const CMD_PONG = 'pong'
+const CMD_WRAPPER_OPENED_WEBSOCKET = 'wrapperOpenedWebsocket'
+const CMD_WRAPPER_CLOSE_WEBSOCKET = 'wrapperClosedWebsocket'
+
+class ZeroFrame {
+ constructor(url) {
+ this.url = url
+ this.waiting_cb = {}
+ this.wrapper_nonce = document.location.href.replace(/.*wrapper_nonce=([A-Za-z0-9]+).*/, "$1")
+ this.connect()
+ this.next_message_id = 1
+ this.init()
+ }
+
+ init() {
+ return this
+ }
+
+ connect() {
+ this.target = window.parent
+ window.addEventListener('message', e => this.onMessage(e), false)
+ this.cmd(CMD_INNER_READY)
+ }
+
+ onMessage(e) {
+ let message = e.data
+ let cmd = message.cmd
+ if (cmd === CMD_RESPONSE) {
+ if (this.waiting_cb[message.to] !== undefined) {
+ this.waiting_cb[message.to](message.result)
+ }
+ else {
+ this.log("Websocket callback not found:", message)
+ }
+ } else if (cmd === CMD_WRAPPER_READY) {
+ this.cmd(CMD_INNER_READY)
+ } else if (cmd === CMD_PING) {
+ this.response(message.id, CMD_PONG)
+ } else if (cmd === CMD_WRAPPER_OPENED_WEBSOCKET) {
+ this.onOpenWebsocket()
+ } else if (cmd === CMD_WRAPPER_CLOSE_WEBSOCKET) {
+ this.onCloseWebsocket()
+ } else {
+ this.onRequest(cmd, message)
+ }
+ }
+
+ onRequest(cmd, message) {
+ this.log("Unknown request", message)
+ }
+
+ response(to, result) {
+ this.send({
+ cmd: CMD_RESPONSE,
+ to: to,
+ result: result
+ })
+ }
+
+ cmd(cmd, params={}, cb=null) {
+ this.send({
+ cmd: cmd,
+ params: params
+ }, cb)
+ }
+
+ cmdp(cmd, params={}) {
+ return new Promise((resolve, reject) => {
+ this.cmd(cmd, params, (res) => {
+ if (res && res.error) {
+ reject(res.error)
+ } else {
+ resolve(res)
+ }
+ })
+ })
+ }
+
+ send(message, cb=null) {
+ message.wrapper_nonce = this.wrapper_nonce
+ message.id = this.next_message_id
+ this.next_message_id++
+ this.target.postMessage(message, '*')
+ if (cb) {
+ this.waiting_cb[message.id] = cb
+ }
+ }
+
+ log(...args) {
+ console.log.apply(console, ['[ZeroFrame]'].concat(args))
+ }
+
+ onOpenWebsocket() {
+ this.log('Websocket open')
+ }
+
+ onCloseWebsocket() {
+ this.log('Websocket close')
+ }
+
+ monkeyPatchAjax() {
+ var page = this
+ XMLHttpRequest.prototype.realOpen = XMLHttpRequest.prototype.open
+ this.cmd("wrapperGetAjaxKey", [], (res) => { this.ajax_key = res })
+ var newOpen = function (method, url, async) {
+ url += "?ajax_key=" + page.ajax_key
+ return this.realOpen(method, url, async)
+ }
+ XMLHttpRequest.prototype.open = newOpen
+ }
+}
diff --git a/plugins/ContentFilter/plugin_info.json b/plugins/ContentFilter/plugin_info.json
new file mode 100644
index 00000000..f63bc984
--- /dev/null
+++ b/plugins/ContentFilter/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "ContentFilter",
+ "description": "Manage site and user block list.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/Cors/CorsPlugin.py b/plugins/Cors/CorsPlugin.py
new file mode 100644
index 00000000..c9437538
--- /dev/null
+++ b/plugins/Cors/CorsPlugin.py
@@ -0,0 +1,139 @@
+import re
+import html
+import copy
+import os
+import gevent
+
+from Plugin import PluginManager
+from Translate import Translate
+
+
+plugin_dir = os.path.dirname(__file__)
+
+if "_" not in locals():
+ _ = Translate(plugin_dir + "/languages/")
+
+
+def getCorsPath(site, inner_path):
+ match = re.match("^cors-([A-Za-z0-9]{26,35})/(.*)", inner_path)
+ if not match:
+ raise Exception("Invalid cors path: %s" % inner_path)
+ cors_address = match.group(1)
+ cors_inner_path = match.group(2)
+
+ if not "Cors:%s" % cors_address in site.settings["permissions"]:
+ raise Exception("This site has no permission to access site %s" % cors_address)
+
+ return cors_address, cors_inner_path
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ def hasSitePermission(self, address, cmd=None):
+ if super(UiWebsocketPlugin, self).hasSitePermission(address, cmd=cmd):
+ return True
+
+ allowed_commands = [
+ "fileGet", "fileList", "dirList", "fileRules", "optionalFileInfo",
+ "fileQuery", "dbQuery", "userGetSettings", "siteInfo"
+ ]
+ if not "Cors:%s" % address in self.site.settings["permissions"] or cmd not in allowed_commands:
+ return False
+ else:
+ return True
+
+ # Add cors support for file commands
+ def corsFuncWrapper(self, func_name, to, inner_path, *args, **kwargs):
+ if inner_path.startswith("cors-"):
+ cors_address, cors_inner_path = getCorsPath(self.site, inner_path)
+
+ req_self = copy.copy(self)
+ req_self.site = self.server.sites.get(cors_address) # Change the site to the merged one
+ if not req_self.site:
+ return {"error": "No site found"}
+
+ func = getattr(super(UiWebsocketPlugin, req_self), func_name)
+ back = func(to, cors_inner_path, *args, **kwargs)
+ return back
+ else:
+ func = getattr(super(UiWebsocketPlugin, self), func_name)
+ return func(to, inner_path, *args, **kwargs)
+
+ def actionFileGet(self, to, inner_path, *args, **kwargs):
+ return self.corsFuncWrapper("actionFileGet", to, inner_path, *args, **kwargs)
+
+ def actionFileList(self, to, inner_path, *args, **kwargs):
+ return self.corsFuncWrapper("actionFileList", to, inner_path, *args, **kwargs)
+
+ def actionDirList(self, to, inner_path, *args, **kwargs):
+ return self.corsFuncWrapper("actionDirList", to, inner_path, *args, **kwargs)
+
+ def actionFileRules(self, to, inner_path, *args, **kwargs):
+ return self.corsFuncWrapper("actionFileRules", to, inner_path, *args, **kwargs)
+
+ def actionOptionalFileInfo(self, to, inner_path, *args, **kwargs):
+ return self.corsFuncWrapper("actionOptionalFileInfo", to, inner_path, *args, **kwargs)
+
+ def actionCorsPermission(self, to, address):
+ if isinstance(address, list):
+ addresses = address
+ else:
+ addresses = [address]
+
+ button_title = _["Grant"]
+ site_names = []
+ site_addresses = []
+ for address in addresses:
+ site = self.server.sites.get(address)
+ if site:
+ site_name = site.content_manager.contents.get("content.json", {}).get("title", address)
+ else:
+ site_name = address
+ # If at least one site is not downloaded yet, show "Grant & Add" instead
+ button_title = _["Grant & Add"]
+
+ if not (site and "Cors:" + address in self.permissions):
+ # No site or no permission
+ site_names.append(site_name)
+ site_addresses.append(address)
+
+ if len(site_names) == 0:
+ return "ignored"
+
+ self.cmd(
+ "confirm",
+ [_["This site requests read permission to: %s"] % ", ".join(map(html.escape, site_names)), button_title],
+ lambda res: self.cbCorsPermission(to, site_addresses)
+ )
+
+ def cbCorsPermission(self, to, addresses):
+ # Add permissions
+ for address in addresses:
+ permission = "Cors:" + address
+ if permission not in self.site.settings["permissions"]:
+ self.site.settings["permissions"].append(permission)
+
+ self.site.saveSettings()
+ self.site.updateWebsocket(permission_added=permission)
+
+ self.response(to, "ok")
+
+ for address in addresses:
+ site = self.server.sites.get(address)
+ if not site:
+ gevent.spawn(self.server.site_manager.need, address)
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ # Allow to load cross origin files using /cors-address/file.jpg
+ def parsePath(self, path):
+ path_parts = super(UiRequestPlugin, self).parsePath(path)
+ if "cors-" not in path: # Optimization
+ return path_parts
+ site = self.server.sites[path_parts["address"]]
+ try:
+ path_parts["address"], path_parts["inner_path"] = getCorsPath(site, path_parts["inner_path"])
+ except Exception:
+ return None
+ return path_parts
diff --git a/plugins/Cors/__init__.py b/plugins/Cors/__init__.py
new file mode 100644
index 00000000..bcaa502b
--- /dev/null
+++ b/plugins/Cors/__init__.py
@@ -0,0 +1 @@
+from . import CorsPlugin
\ No newline at end of file
diff --git a/plugins/Cors/plugin_info.json b/plugins/Cors/plugin_info.json
new file mode 100644
index 00000000..f8af18fa
--- /dev/null
+++ b/plugins/Cors/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "Cors",
+ "description": "Cross site resource read.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/CryptMessage/CryptMessage.py b/plugins/CryptMessage/CryptMessage.py
new file mode 100644
index 00000000..8349809c
--- /dev/null
+++ b/plugins/CryptMessage/CryptMessage.py
@@ -0,0 +1,58 @@
+import hashlib
+import base64
+import struct
+from lib import sslcrypto
+from Crypt import Crypt
+
+
+curve = sslcrypto.ecc.get_curve("secp256k1")
+
+
+def eciesEncrypt(data, pubkey, ciphername="aes-256-cbc"):
+ ciphertext, key_e = curve.encrypt(
+ data,
+ base64.b64decode(pubkey),
+ algo=ciphername,
+ derivation="sha512",
+ return_aes_key=True
+ )
+ return key_e, ciphertext
+
+
+@Crypt.thread_pool_crypt.wrap
+def eciesDecryptMulti(encrypted_datas, privatekey):
+ texts = [] # Decoded texts
+ for encrypted_data in encrypted_datas:
+ try:
+ text = eciesDecrypt(encrypted_data, privatekey).decode("utf8")
+ texts.append(text)
+ except Exception:
+ texts.append(None)
+ return texts
+
+
+def eciesDecrypt(ciphertext, privatekey):
+ return curve.decrypt(base64.b64decode(ciphertext), curve.wif_to_private(privatekey.encode()), derivation="sha512")
+
+
+def decodePubkey(pubkey):
+ i = 0
+ curve = struct.unpack('!H', pubkey[i:i + 2])[0]
+ i += 2
+ tmplen = struct.unpack('!H', pubkey[i:i + 2])[0]
+ i += 2
+ pubkey_x = pubkey[i:i + tmplen]
+ i += tmplen
+ tmplen = struct.unpack('!H', pubkey[i:i + 2])[0]
+ i += 2
+ pubkey_y = pubkey[i:i + tmplen]
+ i += tmplen
+ return curve, pubkey_x, pubkey_y, i
+
+
+def split(encrypted):
+ iv = encrypted[0:16]
+ curve, pubkey_x, pubkey_y, i = decodePubkey(encrypted[16:])
+ ciphertext = encrypted[16 + i:-32]
+
+ return iv, ciphertext
diff --git a/plugins/CryptMessage/CryptMessagePlugin.py b/plugins/CryptMessage/CryptMessagePlugin.py
new file mode 100644
index 00000000..7c24f730
--- /dev/null
+++ b/plugins/CryptMessage/CryptMessagePlugin.py
@@ -0,0 +1,225 @@
+import base64
+import os
+
+import gevent
+
+from Plugin import PluginManager
+from Crypt import CryptBitcoin, CryptHash
+from Config import config
+import sslcrypto
+
+from . import CryptMessage
+
+curve = sslcrypto.ecc.get_curve("secp256k1")
+
+
+@PluginManager.registerTo("UiWebsocket")
+class UiWebsocketPlugin(object):
+ # - Actions -
+
+ # Returns user's public key unique to site
+ # Return: Public key
+ def actionUserPublickey(self, to, index=0):
+ self.response(to, self.user.getEncryptPublickey(self.site.address, index))
+
+ # Encrypt a text using the publickey or user's sites unique publickey
+ # Return: Encrypted text using base64 encoding
+ def actionEciesEncrypt(self, to, text, publickey=0, return_aes_key=False):
+ if type(publickey) is int: # Encrypt using user's publickey
+ publickey = self.user.getEncryptPublickey(self.site.address, publickey)
+ aes_key, encrypted = CryptMessage.eciesEncrypt(text.encode("utf8"), publickey)
+ if return_aes_key:
+ self.response(to, [base64.b64encode(encrypted).decode("utf8"), base64.b64encode(aes_key).decode("utf8")])
+ else:
+ self.response(to, base64.b64encode(encrypted).decode("utf8"))
+
+ # Decrypt a text using privatekey or the user's site unique private key
+ # Return: Decrypted text or list of decrypted texts
+ def actionEciesDecrypt(self, to, param, privatekey=0):
+ if type(privatekey) is int: # Decrypt using user's privatekey
+ privatekey = self.user.getEncryptPrivatekey(self.site.address, privatekey)
+
+ if type(param) == list:
+ encrypted_texts = param
+ else:
+ encrypted_texts = [param]
+
+ texts = CryptMessage.eciesDecryptMulti(encrypted_texts, privatekey)
+
+ if type(param) == list:
+ self.response(to, texts)
+ else:
+ self.response(to, texts[0])
+
+ # Encrypt a text using AES
+ # Return: Iv, AES key, Encrypted text
+ def actionAesEncrypt(self, to, text, key=None):
+ if key:
+ key = base64.b64decode(key)
+ else:
+ key = sslcrypto.aes.new_key()
+
+ if text:
+ encrypted, iv = sslcrypto.aes.encrypt(text.encode("utf8"), key)
+ else:
+ encrypted, iv = b"", b""
+
+ res = [base64.b64encode(item).decode("utf8") for item in [key, iv, encrypted]]
+ self.response(to, res)
+
+ # Decrypt a text using AES
+ # Return: Decrypted text
+ def actionAesDecrypt(self, to, *args):
+ if len(args) == 3: # Single decrypt
+ encrypted_texts = [(args[0], args[1])]
+ keys = [args[2]]
+ else: # Batch decrypt
+ encrypted_texts, keys = args
+
+ texts = [] # Decoded texts
+ for iv, encrypted_text in encrypted_texts:
+ encrypted_text = base64.b64decode(encrypted_text)
+ iv = base64.b64decode(iv)
+ text = None
+ for key in keys:
+ try:
+ decrypted = sslcrypto.aes.decrypt(encrypted_text, iv, base64.b64decode(key))
+ if decrypted and decrypted.decode("utf8"): # Valid text decoded
+ text = decrypted.decode("utf8")
+ except Exception as err:
+ pass
+ texts.append(text)
+
+ if len(args) == 3:
+ self.response(to, texts[0])
+ else:
+ self.response(to, texts)
+
+ # Sign data using ECDSA
+ # Return: Signature
+ def actionEcdsaSign(self, to, data, privatekey=None):
+ if privatekey is None: # Sign using user's privatekey
+ privatekey = self.user.getAuthPrivatekey(self.site.address)
+
+ self.response(to, CryptBitcoin.sign(data, privatekey))
+
+ # Verify data using ECDSA (address is either a address or array of addresses)
+ # Return: bool
+ def actionEcdsaVerify(self, to, data, address, signature):
+ self.response(to, CryptBitcoin.verify(data, address, signature))
+
+ # Gets the publickey of a given privatekey
+ def actionEccPrivToPub(self, to, privatekey):
+ self.response(to, curve.private_to_public(curve.wif_to_private(privatekey.encode())))
+
+ # Gets the address of a given publickey
+ def actionEccPubToAddr(self, to, publickey):
+ self.response(to, curve.public_to_address(bytes.fromhex(publickey)))
+
+
+@PluginManager.registerTo("User")
+class UserPlugin(object):
+ def getEncryptPrivatekey(self, address, param_index=0):
+ if param_index < 0 or param_index > 1000:
+ raise Exception("Param_index out of range")
+
+ site_data = self.getSiteData(address)
+
+ if site_data.get("cert"): # Different privatekey for different cert provider
+ index = param_index + self.getAddressAuthIndex(site_data["cert"])
+ else:
+ index = param_index
+
+ if "encrypt_privatekey_%s" % index not in site_data:
+ address_index = self.getAddressAuthIndex(address)
+ crypt_index = address_index + 1000 + index
+ site_data["encrypt_privatekey_%s" % index] = CryptBitcoin.hdPrivatekey(self.master_seed, crypt_index)
+ self.log.debug("New encrypt privatekey generated for %s:%s" % (address, index))
+ return site_data["encrypt_privatekey_%s" % index]
+
+ def getEncryptPublickey(self, address, param_index=0):
+ if param_index < 0 or param_index > 1000:
+ raise Exception("Param_index out of range")
+
+ site_data = self.getSiteData(address)
+
+ if site_data.get("cert"): # Different privatekey for different cert provider
+ index = param_index + self.getAddressAuthIndex(site_data["cert"])
+ else:
+ index = param_index
+
+ if "encrypt_publickey_%s" % index not in site_data:
+ privatekey = self.getEncryptPrivatekey(address, param_index).encode()
+ publickey = curve.private_to_public(curve.wif_to_private(privatekey) + b"\x01")
+ site_data["encrypt_publickey_%s" % index] = base64.b64encode(publickey).decode("utf8")
+ return site_data["encrypt_publickey_%s" % index]
+
+
+@PluginManager.registerTo("Actions")
+class ActionsPlugin:
+ publickey = "A3HatibU4S6eZfIQhVs2u7GLN5G9wXa9WwlkyYIfwYaj"
+ privatekey = "5JBiKFYBm94EUdbxtnuLi6cvNcPzcKymCUHBDf2B6aq19vvG3rL"
+ utf8_text = '\xc1rv\xedzt\xfbr\xf5t\xfck\xf6rf\xfar\xf3g\xe9p'
+
+ def getBenchmarkTests(self, online=False):
+ if hasattr(super(), "getBenchmarkTests"):
+ tests = super().getBenchmarkTests(online)
+ else:
+ tests = []
+
+ aes_key, encrypted = CryptMessage.eciesEncrypt(self.utf8_text.encode("utf8"), self.publickey) # Warm-up
+ tests.extend([
+ {"func": self.testCryptEciesEncrypt, "kwargs": {}, "num": 100, "time_standard": 1.2},
+ {"func": self.testCryptEciesDecrypt, "kwargs": {}, "num": 500, "time_standard": 1.3},
+ {"func": self.testCryptEciesDecryptMulti, "kwargs": {}, "num": 5, "time_standard": 0.68},
+ {"func": self.testCryptAesEncrypt, "kwargs": {}, "num": 10000, "time_standard": 0.27},
+ {"func": self.testCryptAesDecrypt, "kwargs": {}, "num": 10000, "time_standard": 0.25}
+ ])
+ return tests
+
+ def testCryptEciesEncrypt(self, num_run=1):
+ for i in range(num_run):
+ aes_key, encrypted = CryptMessage.eciesEncrypt(self.utf8_text.encode("utf8"), self.publickey)
+ assert len(aes_key) == 32
+ yield "."
+
+ def testCryptEciesDecrypt(self, num_run=1):
+ aes_key, encrypted = CryptMessage.eciesEncrypt(self.utf8_text.encode("utf8"), self.publickey)
+ for i in range(num_run):
+ assert len(aes_key) == 32
+ decrypted = CryptMessage.eciesDecrypt(base64.b64encode(encrypted), self.privatekey)
+ assert decrypted == self.utf8_text.encode("utf8"), "%s != %s" % (decrypted, self.utf8_text.encode("utf8"))
+ yield "."
+
+ def testCryptEciesDecryptMulti(self, num_run=1):
+ yield "x 100 (%s threads) " % config.threads_crypt
+ aes_key, encrypted = CryptMessage.eciesEncrypt(self.utf8_text.encode("utf8"), self.publickey)
+
+ threads = []
+ for i in range(num_run):
+ assert len(aes_key) == 32
+ threads.append(gevent.spawn(
+ CryptMessage.eciesDecryptMulti, [base64.b64encode(encrypted)] * 100, self.privatekey
+ ))
+
+ for thread in threads:
+ res = thread.get()
+ assert res[0] == self.utf8_text, "%s != %s" % (res[0], self.utf8_text)
+ assert res[0] == res[-1], "%s != %s" % (res[0], res[-1])
+ yield "."
+ gevent.joinall(threads)
+
+ def testCryptAesEncrypt(self, num_run=1):
+ for i in range(num_run):
+ key = os.urandom(32)
+ encrypted = sslcrypto.aes.encrypt(self.utf8_text.encode("utf8"), key)
+ yield "."
+
+ def testCryptAesDecrypt(self, num_run=1):
+ key = os.urandom(32)
+ encrypted_text, iv = sslcrypto.aes.encrypt(self.utf8_text.encode("utf8"), key)
+
+ for i in range(num_run):
+ decrypted = sslcrypto.aes.decrypt(encrypted_text, iv, key).decode("utf8")
+ assert decrypted == self.utf8_text
+ yield "."
diff --git a/plugins/CryptMessage/Test/TestCrypt.py b/plugins/CryptMessage/Test/TestCrypt.py
new file mode 100644
index 00000000..25a077d8
--- /dev/null
+++ b/plugins/CryptMessage/Test/TestCrypt.py
@@ -0,0 +1,136 @@
+import pytest
+import base64
+from CryptMessage import CryptMessage
+
+
+@pytest.mark.usefixtures("resetSettings")
+class TestCrypt:
+ publickey = "A3HatibU4S6eZfIQhVs2u7GLN5G9wXa9WwlkyYIfwYaj"
+ privatekey = "5JBiKFYBm94EUdbxtnuLi6cvNcPzcKymCUHBDf2B6aq19vvG3rL"
+ utf8_text = '\xc1rv\xedzt\xfbr\xf5t\xfck\xf6rf\xfar\xf3g\xe9'
+ ecies_encrypted_text = "R5J1RFIDOzE5bnWopvccmALKACCk/CRcd/KSE9OgExJKASyMbZ57JVSUenL2TpABMmcT+wAgr2UrOqClxpOWvIUwvwwupXnMbRTzthhIJJrTRW3sCJVaYlGEMn9DAcvbflgEkQX/MVVdLV3tWKySs1Vk8sJC/y+4pGYCrZz7vwDNEEERaqU="
+
+ @pytest.mark.parametrize("text", [b"hello", '\xc1rv\xedzt\xfbr\xf5t\xfck\xf6rf\xfar\xf3g\xe9'.encode("utf8")])
+ @pytest.mark.parametrize("text_repeat", [1, 10, 128, 1024])
+ def testEncryptEcies(self, text, text_repeat):
+ text_repeated = text * text_repeat
+ aes_key, encrypted = CryptMessage.eciesEncrypt(text_repeated, self.publickey)
+ assert len(aes_key) == 32
+ # assert len(encrypted) == 134 + int(len(text) / 16) * 16 # Not always true
+
+ assert CryptMessage.eciesDecrypt(base64.b64encode(encrypted), self.privatekey) == text_repeated
+
+ def testDecryptEcies(self, user):
+ assert CryptMessage.eciesDecrypt(self.ecies_encrypted_text, self.privatekey) == b"hello"
+
+ def testPublickey(self, ui_websocket):
+ pub = ui_websocket.testAction("UserPublickey", 0)
+ assert len(pub) == 44 # Compressed, b64 encoded publickey
+
+ # Different pubkey for specificed index
+ assert ui_websocket.testAction("UserPublickey", 1) != ui_websocket.testAction("UserPublickey", 0)
+
+ # Same publickey for same index
+ assert ui_websocket.testAction("UserPublickey", 2) == ui_websocket.testAction("UserPublickey", 2)
+
+ # Different publickey for different cert
+ site_data = ui_websocket.user.getSiteData(ui_websocket.site.address)
+ site_data["cert"] = None
+ pub1 = ui_websocket.testAction("UserPublickey", 0)
+
+ site_data = ui_websocket.user.getSiteData(ui_websocket.site.address)
+ site_data["cert"] = "zeroid.bit"
+ pub2 = ui_websocket.testAction("UserPublickey", 0)
+ assert pub1 != pub2
+
+ def testEcies(self, ui_websocket):
+ pub = ui_websocket.testAction("UserPublickey")
+
+ encrypted = ui_websocket.testAction("EciesEncrypt", "hello", pub)
+ assert len(encrypted) == 180
+
+ # Don't allow decrypt using other privatekey index
+ decrypted = ui_websocket.testAction("EciesDecrypt", encrypted, 123)
+ assert decrypted != "hello"
+
+ # Decrypt using correct privatekey
+ decrypted = ui_websocket.testAction("EciesDecrypt", encrypted)
+ assert decrypted == "hello"
+
+ # Decrypt incorrect text
+ decrypted = ui_websocket.testAction("EciesDecrypt", "baad")
+ assert decrypted is None
+
+ # Decrypt batch
+ decrypted = ui_websocket.testAction("EciesDecrypt", [encrypted, "baad", encrypted])
+ assert decrypted == ["hello", None, "hello"]
+
+ def testEciesUtf8(self, ui_websocket):
+ # Utf8 test
+ ui_websocket.actionEciesEncrypt(0, self.utf8_text)
+ encrypted = ui_websocket.ws.getResult()
+
+ ui_websocket.actionEciesDecrypt(0, encrypted)
+ assert ui_websocket.ws.getResult() == self.utf8_text
+
+ def testEciesAes(self, ui_websocket):
+ ui_websocket.actionEciesEncrypt(0, "hello", return_aes_key=True)
+ ecies_encrypted, aes_key = ui_websocket.ws.getResult()
+
+ # Decrypt using Ecies
+ ui_websocket.actionEciesDecrypt(0, ecies_encrypted)
+ assert ui_websocket.ws.getResult() == "hello"
+
+ # Decrypt using AES
+ aes_iv, aes_encrypted = CryptMessage.split(base64.b64decode(ecies_encrypted))
+
+ ui_websocket.actionAesDecrypt(0, base64.b64encode(aes_iv), base64.b64encode(aes_encrypted), aes_key)
+ assert ui_websocket.ws.getResult() == "hello"
+
+ def testEciesAesLongpubkey(self, ui_websocket):
+ privatekey = "5HwVS1bTFnveNk9EeGaRenWS1QFzLFb5kuncNbiY3RiHZrVR6ok"
+
+ ecies_encrypted, aes_key = ["lWiXfEikIjw1ac3J/RaY/gLKACALRUfksc9rXYRFyKDSaxhwcSFBYCgAdIyYlY294g/6VgAf/68PYBVMD3xKH1n7Zbo+ge8b4i/XTKmCZRJvy0eutMKWckYCMVcxgIYNa/ZL1BY1kvvH7omgzg1wBraoLfdbNmVtQgdAZ9XS8PwRy6OB2Q==", "Rvlf7zsMuBFHZIGHcbT1rb4If+YTmsWDv6kGwcvSeMM="]
+
+ # Decrypt using Ecies
+ ui_websocket.actionEciesDecrypt(0, ecies_encrypted, privatekey)
+ assert ui_websocket.ws.getResult() == "hello"
+
+ # Decrypt using AES
+ aes_iv, aes_encrypted = CryptMessage.split(base64.b64decode(ecies_encrypted))
+
+ ui_websocket.actionAesDecrypt(0, base64.b64encode(aes_iv), base64.b64encode(aes_encrypted), aes_key)
+ assert ui_websocket.ws.getResult() == "hello"
+
+ def testAes(self, ui_websocket):
+ ui_websocket.actionAesEncrypt(0, "hello")
+ key, iv, encrypted = ui_websocket.ws.getResult()
+
+ assert len(key) == 44
+ assert len(iv) == 24
+ assert len(encrypted) == 24
+
+ # Single decrypt
+ ui_websocket.actionAesDecrypt(0, iv, encrypted, key)
+ assert ui_websocket.ws.getResult() == "hello"
+
+ # Batch decrypt
+ ui_websocket.actionAesEncrypt(0, "hello")
+ key2, iv2, encrypted2 = ui_websocket.ws.getResult()
+
+ assert [key, iv, encrypted] != [key2, iv2, encrypted2]
+
+ # 2 correct key
+ ui_websocket.actionAesDecrypt(0, [[iv, encrypted], [iv, encrypted], [iv, "baad"], [iv2, encrypted2]], [key])
+ assert ui_websocket.ws.getResult() == ["hello", "hello", None, None]
+
+ # 3 key
+ ui_websocket.actionAesDecrypt(0, [[iv, encrypted], [iv, encrypted], [iv, "baad"], [iv2, encrypted2]], [key, key2])
+ assert ui_websocket.ws.getResult() == ["hello", "hello", None, "hello"]
+
+ def testAesUtf8(self, ui_websocket):
+ ui_websocket.actionAesEncrypt(0, self.utf8_text)
+ key, iv, encrypted = ui_websocket.ws.getResult()
+
+ ui_websocket.actionAesDecrypt(0, iv, encrypted, key)
+ assert ui_websocket.ws.getResult() == self.utf8_text
diff --git a/plugins/CryptMessage/Test/conftest.py b/plugins/CryptMessage/Test/conftest.py
new file mode 100644
index 00000000..8c1df5b2
--- /dev/null
+++ b/plugins/CryptMessage/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
\ No newline at end of file
diff --git a/plugins/CryptMessage/Test/pytest.ini b/plugins/CryptMessage/Test/pytest.ini
new file mode 100644
index 00000000..d09210d1
--- /dev/null
+++ b/plugins/CryptMessage/Test/pytest.ini
@@ -0,0 +1,5 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/CryptMessage/__init__.py b/plugins/CryptMessage/__init__.py
new file mode 100644
index 00000000..6aeb4e52
--- /dev/null
+++ b/plugins/CryptMessage/__init__.py
@@ -0,0 +1 @@
+from . import CryptMessagePlugin
\ No newline at end of file
diff --git a/plugins/CryptMessage/plugin_info.json b/plugins/CryptMessage/plugin_info.json
new file mode 100644
index 00000000..96dfdd89
--- /dev/null
+++ b/plugins/CryptMessage/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "CryptMessage",
+ "description": "Cryptographic functions of ECIES and AES data encryption/decryption.",
+ "default": "enabled"
+}
\ No newline at end of file
diff --git a/plugins/disabled-Bootstrapper/BootstrapperDb.py b/plugins/disabled-Bootstrapper/BootstrapperDb.py
new file mode 100644
index 00000000..0866dc3e
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/BootstrapperDb.py
@@ -0,0 +1,156 @@
+import time
+import re
+
+import gevent
+
+from Config import config
+from Db import Db
+from util import helper
+
+
+class BootstrapperDb(Db.Db):
+ def __init__(self):
+ self.version = 7
+ self.hash_ids = {} # hash -> id cache
+ super(BootstrapperDb, self).__init__({"db_name": "Bootstrapper"}, "%s/bootstrapper.db" % config.data_dir)
+ self.foreign_keys = True
+ self.checkTables()
+ self.updateHashCache()
+ gevent.spawn(self.cleanup)
+
+ def cleanup(self):
+ while 1:
+ time.sleep(4 * 60)
+ timeout = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() - 60 * 40))
+ self.execute("DELETE FROM peer WHERE date_announced < ?", [timeout])
+
+ def updateHashCache(self):
+ res = self.execute("SELECT * FROM hash")
+ self.hash_ids = {row["hash"]: row["hash_id"] for row in res}
+ self.log.debug("Loaded %s hash_ids" % len(self.hash_ids))
+
+ def checkTables(self):
+ version = int(self.execute("PRAGMA user_version").fetchone()[0])
+ self.log.debug("Db version: %s, needed: %s" % (version, self.version))
+ if version < self.version:
+ self.createTables()
+ else:
+ self.execute("VACUUM")
+
+ def createTables(self):
+ # Delete all tables
+ self.execute("PRAGMA writable_schema = 1")
+ self.execute("DELETE FROM sqlite_master WHERE type IN ('table', 'index', 'trigger')")
+ self.execute("PRAGMA writable_schema = 0")
+ self.execute("VACUUM")
+ self.execute("PRAGMA INTEGRITY_CHECK")
+ # Create new tables
+ self.execute("""
+ CREATE TABLE peer (
+ peer_id INTEGER PRIMARY KEY ASC AUTOINCREMENT NOT NULL UNIQUE,
+ type TEXT,
+ address TEXT,
+ port INTEGER NOT NULL,
+ date_added DATETIME DEFAULT (CURRENT_TIMESTAMP),
+ date_announced DATETIME DEFAULT (CURRENT_TIMESTAMP)
+ );
+ """)
+ self.execute("CREATE UNIQUE INDEX peer_key ON peer (address, port);")
+
+ self.execute("""
+ CREATE TABLE peer_to_hash (
+ peer_to_hash_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
+ peer_id INTEGER REFERENCES peer (peer_id) ON DELETE CASCADE,
+ hash_id INTEGER REFERENCES hash (hash_id)
+ );
+ """)
+ self.execute("CREATE INDEX peer_id ON peer_to_hash (peer_id);")
+ self.execute("CREATE INDEX hash_id ON peer_to_hash (hash_id);")
+
+ self.execute("""
+ CREATE TABLE hash (
+ hash_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
+ hash BLOB UNIQUE NOT NULL,
+ date_added DATETIME DEFAULT (CURRENT_TIMESTAMP)
+ );
+ """)
+ self.execute("PRAGMA user_version = %s" % self.version)
+
+ def getHashId(self, hash):
+ if hash not in self.hash_ids:
+ self.log.debug("New hash: %s" % repr(hash))
+ res = self.execute("INSERT OR IGNORE INTO hash ?", {"hash": hash})
+ self.hash_ids[hash] = res.lastrowid
+ return self.hash_ids[hash]
+
+ def peerAnnounce(self, ip_type, address, port=None, hashes=[], onion_signed=False, delete_missing_hashes=False):
+ hashes_ids_announced = []
+ for hash in hashes:
+ hashes_ids_announced.append(self.getHashId(hash))
+
+ # Check user
+ res = self.execute("SELECT peer_id FROM peer WHERE ? LIMIT 1", {"address": address, "port": port})
+
+ user_row = res.fetchone()
+ now = time.strftime("%Y-%m-%d %H:%M:%S")
+ if user_row:
+ peer_id = user_row["peer_id"]
+ self.execute("UPDATE peer SET date_announced = ? WHERE peer_id = ?", (now, peer_id))
+ else:
+ self.log.debug("New peer: %s signed: %s" % (address, onion_signed))
+ if ip_type == "onion" and not onion_signed:
+ return len(hashes)
+ res = self.execute("INSERT INTO peer ?", {"type": ip_type, "address": address, "port": port, "date_announced": now})
+ peer_id = res.lastrowid
+
+ # Check user's hashes
+ res = self.execute("SELECT * FROM peer_to_hash WHERE ?", {"peer_id": peer_id})
+ hash_ids_db = [row["hash_id"] for row in res]
+ if hash_ids_db != hashes_ids_announced:
+ hash_ids_added = set(hashes_ids_announced) - set(hash_ids_db)
+ hash_ids_removed = set(hash_ids_db) - set(hashes_ids_announced)
+ if ip_type != "onion" or onion_signed:
+ for hash_id in hash_ids_added:
+ self.execute("INSERT INTO peer_to_hash ?", {"peer_id": peer_id, "hash_id": hash_id})
+ if hash_ids_removed and delete_missing_hashes:
+ self.execute("DELETE FROM peer_to_hash WHERE ?", {"peer_id": peer_id, "hash_id": list(hash_ids_removed)})
+
+ return len(hash_ids_added) + len(hash_ids_removed)
+ else:
+ return 0
+
+ def peerList(self, hash, address=None, onions=[], port=None, limit=30, need_types=["ipv4", "onion"], order=True):
+ back = {"ipv4": [], "ipv6": [], "onion": []}
+ if limit == 0:
+ return back
+ hashid = self.getHashId(hash)
+
+ if order:
+ order_sql = "ORDER BY date_announced DESC"
+ else:
+ order_sql = ""
+ where_sql = "hash_id = :hashid"
+ if onions:
+ onions_escaped = ["'%s'" % re.sub("[^a-z0-9,]", "", onion) for onion in onions if type(onion) is str]
+ where_sql += " AND address NOT IN (%s)" % ",".join(onions_escaped)
+ elif address:
+ where_sql += " AND NOT (address = :address AND port = :port)"
+
+ query = """
+ SELECT type, address, port
+ FROM peer_to_hash
+ LEFT JOIN peer USING (peer_id)
+ WHERE %s
+ %s
+ LIMIT :limit
+ """ % (where_sql, order_sql)
+ res = self.execute(query, {"hashid": hashid, "address": address, "port": port, "limit": limit})
+
+ for row in res:
+ if row["type"] in need_types:
+ if row["type"] == "onion":
+ packed = helper.packOnionAddress(row["address"], row["port"])
+ else:
+ packed = helper.packAddress(str(row["address"]), row["port"])
+ back[row["type"]].append(packed)
+ return back
diff --git a/plugins/disabled-Bootstrapper/BootstrapperPlugin.py b/plugins/disabled-Bootstrapper/BootstrapperPlugin.py
new file mode 100644
index 00000000..5ddc36b6
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/BootstrapperPlugin.py
@@ -0,0 +1,156 @@
+import time
+
+from util import helper
+
+from Plugin import PluginManager
+from .BootstrapperDb import BootstrapperDb
+from Crypt import CryptRsa
+from Config import config
+
+if "db" not in locals().keys(): # Share during reloads
+ db = BootstrapperDb()
+
+
+@PluginManager.registerTo("FileRequest")
+class FileRequestPlugin(object):
+ def checkOnionSigns(self, onions, onion_signs, onion_sign_this):
+ if not onion_signs or len(onion_signs) != len(set(onions)):
+ return False
+
+ if time.time() - float(onion_sign_this) > 3 * 60:
+ return False # Signed out of allowed 3 minutes
+
+ onions_signed = []
+ # Check onion signs
+ for onion_publickey, onion_sign in onion_signs.items():
+ if CryptRsa.verify(onion_sign_this.encode(), onion_publickey, onion_sign):
+ onions_signed.append(CryptRsa.publickeyToOnion(onion_publickey))
+ else:
+ break
+
+ # Check if the same onion addresses signed as the announced onces
+ if sorted(onions_signed) == sorted(set(onions)):
+ return True
+ else:
+ return False
+
+ def actionAnnounce(self, params):
+ time_started = time.time()
+ s = time.time()
+ # Backward compatibility
+ if "ip4" in params["add"]:
+ params["add"].append("ipv4")
+ if "ip4" in params["need_types"]:
+ params["need_types"].append("ipv4")
+
+ hashes = params["hashes"]
+
+ all_onions_signed = self.checkOnionSigns(params.get("onions", []), params.get("onion_signs"), params.get("onion_sign_this"))
+
+ time_onion_check = time.time() - s
+
+ ip_type = self.server.getIpType(self.connection.ip)
+
+ if ip_type == "onion" or self.connection.ip in config.ip_local:
+ is_port_open = False
+ elif ip_type in params["add"]:
+ is_port_open = True
+ else:
+ is_port_open = False
+
+ s = time.time()
+ # Separatley add onions to sites or at once if no onions present
+ i = 0
+ onion_to_hash = {}
+ for onion in params.get("onions", []):
+ if onion not in onion_to_hash:
+ onion_to_hash[onion] = []
+ onion_to_hash[onion].append(hashes[i])
+ i += 1
+
+ hashes_changed = 0
+ for onion, onion_hashes in onion_to_hash.items():
+ hashes_changed += db.peerAnnounce(
+ ip_type="onion",
+ address=onion,
+ port=params["port"],
+ hashes=onion_hashes,
+ onion_signed=all_onions_signed
+ )
+ time_db_onion = time.time() - s
+
+ s = time.time()
+
+ if is_port_open:
+ hashes_changed += db.peerAnnounce(
+ ip_type=ip_type,
+ address=self.connection.ip,
+ port=params["port"],
+ hashes=hashes,
+ delete_missing_hashes=params.get("delete")
+ )
+ time_db_ip = time.time() - s
+
+ s = time.time()
+ # Query sites
+ back = {}
+ peers = []
+ if params.get("onions") and not all_onions_signed and hashes_changed:
+ back["onion_sign_this"] = "%.0f" % time.time() # Send back nonce for signing
+
+ if len(hashes) > 500 or not hashes_changed:
+ limit = 5
+ order = False
+ else:
+ limit = 30
+ order = True
+ for hash in hashes:
+ if time.time() - time_started > 1: # 1 sec limit on request
+ self.connection.log("Announce time limit exceeded after %s/%s sites" % (len(peers), len(hashes)))
+ break
+
+ hash_peers = db.peerList(
+ hash,
+ address=self.connection.ip, onions=list(onion_to_hash.keys()), port=params["port"],
+ limit=min(limit, params["need_num"]), need_types=params["need_types"], order=order
+ )
+ if "ip4" in params["need_types"]: # Backward compatibility
+ hash_peers["ip4"] = hash_peers["ipv4"]
+ del(hash_peers["ipv4"])
+ peers.append(hash_peers)
+ time_peerlist = time.time() - s
+
+ back["peers"] = peers
+ self.connection.log(
+ "Announce %s sites (onions: %s, onion_check: %.3fs, db_onion: %.3fs, db_ip: %.3fs, peerlist: %.3fs, limit: %s)" %
+ (len(hashes), len(onion_to_hash), time_onion_check, time_db_onion, time_db_ip, time_peerlist, limit)
+ )
+ self.response(back)
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ @helper.encodeResponse
+ def actionStatsBootstrapper(self):
+ self.sendHeader()
+
+ # Style
+ yield """
+
+ """
+
+ hash_rows = db.execute("SELECT * FROM hash").fetchall()
+ for hash_row in hash_rows:
+ peer_rows = db.execute(
+ "SELECT * FROM peer LEFT JOIN peer_to_hash USING (peer_id) WHERE hash_id = :hash_id",
+ {"hash_id": hash_row["hash_id"]}
+ ).fetchall()
+
+ yield "
%s (added: %s, peers: %s)
" % (
+ str(hash_row["hash"]).encode().hex(), hash_row["date_added"], len(peer_rows)
+ )
+ for peer_row in peer_rows:
+ yield " - {type} {address}:{port} added: {date_added}, announced: {date_announced}
".format(**dict(peer_row))
diff --git a/plugins/disabled-Bootstrapper/Test/TestBootstrapper.py b/plugins/disabled-Bootstrapper/Test/TestBootstrapper.py
new file mode 100644
index 00000000..198cd022
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/Test/TestBootstrapper.py
@@ -0,0 +1,246 @@
+import hashlib
+import os
+
+import pytest
+
+from Bootstrapper import BootstrapperPlugin
+from Bootstrapper.BootstrapperDb import BootstrapperDb
+from Peer import Peer
+from Crypt import CryptRsa
+from util import helper
+
+
+@pytest.fixture()
+def bootstrapper_db(request):
+ BootstrapperPlugin.db.close()
+ BootstrapperPlugin.db = BootstrapperDb()
+ BootstrapperPlugin.db.createTables() # Reset db
+ BootstrapperPlugin.db.cur.logging = True
+
+ def cleanup():
+ BootstrapperPlugin.db.close()
+ os.unlink(BootstrapperPlugin.db.db_path)
+
+ request.addfinalizer(cleanup)
+ return BootstrapperPlugin.db
+
+
+@pytest.mark.usefixtures("resetSettings")
+class TestBootstrapper:
+ def testHashCache(self, file_server, bootstrapper_db):
+ ip_type = file_server.getIpType(file_server.ip)
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ hash1 = hashlib.sha256(b"site1").digest()
+ hash2 = hashlib.sha256(b"site2").digest()
+ hash3 = hashlib.sha256(b"site3").digest()
+
+ # Verify empty result
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2],
+ "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+
+ assert len(res["peers"][0][ip_type]) == 0 # Empty result
+
+ hash_ids_before = bootstrapper_db.hash_ids.copy()
+
+ bootstrapper_db.updateHashCache()
+
+ assert hash_ids_before == bootstrapper_db.hash_ids
+
+
+ def testBootstrapperDb(self, file_server, bootstrapper_db):
+ ip_type = file_server.getIpType(file_server.ip)
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ hash1 = hashlib.sha256(b"site1").digest()
+ hash2 = hashlib.sha256(b"site2").digest()
+ hash3 = hashlib.sha256(b"site3").digest()
+
+ # Verify empty result
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2],
+ "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+
+ assert len(res["peers"][0][ip_type]) == 0 # Empty result
+
+ # Verify added peer on previous request
+ bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1, hash2], delete_missing_hashes=True)
+
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2],
+ "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+ assert len(res["peers"][0][ip_type]) == 1
+ assert len(res["peers"][1][ip_type]) == 1
+
+ # hash2 deleted from 1.2.3.4
+ bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1], delete_missing_hashes=True)
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2],
+ "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+ assert len(res["peers"][0][ip_type]) == 1
+ assert len(res["peers"][1][ip_type]) == 0
+
+ # Announce 3 hash again
+ bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1, hash2, hash3], delete_missing_hashes=True)
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2, hash3],
+ "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+ assert len(res["peers"][0][ip_type]) == 1
+ assert len(res["peers"][1][ip_type]) == 1
+ assert len(res["peers"][2][ip_type]) == 1
+
+ # Single hash announce
+ res = peer.request("announce", {
+ "hashes": [hash1], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type]
+ })
+ assert len(res["peers"][0][ip_type]) == 1
+
+ # Test DB cleanup
+ assert [row[0] for row in bootstrapper_db.execute("SELECT address FROM peer").fetchall()] == [file_server.ip_external] # 127.0.0.1 never get added to db
+
+ # Delete peers
+ bootstrapper_db.execute("DELETE FROM peer WHERE address = ?", [file_server.ip_external])
+ assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer_to_hash").fetchone()["num"] == 0
+
+ assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM hash").fetchone()["num"] == 3 # 3 sites
+ assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 0 # 0 peer
+
+ def testPassive(self, file_server, bootstrapper_db):
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ ip_type = file_server.getIpType(file_server.ip)
+ hash1 = hashlib.sha256(b"hash1").digest()
+
+ bootstrapper_db.peerAnnounce(ip_type, address=None, port=15441, hashes=[hash1])
+ res = peer.request("announce", {
+ "hashes": [hash1], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": []
+ })
+
+ assert len(res["peers"][0]["ipv4"]) == 0 # Empty result
+
+ def testAddOnion(self, file_server, site, bootstrapper_db, tor_manager):
+ onion1 = tor_manager.addOnion()
+ onion2 = tor_manager.addOnion()
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ hash1 = hashlib.sha256(b"site1").digest()
+ hash2 = hashlib.sha256(b"site2").digest()
+ hash3 = hashlib.sha256(b"site3").digest()
+
+ bootstrapper_db.peerAnnounce(ip_type="ipv4", address="1.2.3.4", port=1234, hashes=[hash1, hash2, hash3])
+ res = peer.request("announce", {
+ "onions": [onion1, onion1, onion2],
+ "hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ipv4", "onion"], "need_num": 10, "add": ["onion"]
+ })
+ assert len(res["peers"][0]["ipv4"]) == 1
+
+ # Onion address not added yet
+ site_peers = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash1)
+ assert len(site_peers["onion"]) == 0
+ assert "onion_sign_this" in res
+
+ # Sign the nonces
+ sign1 = CryptRsa.sign(res["onion_sign_this"].encode(), tor_manager.getPrivatekey(onion1))
+ sign2 = CryptRsa.sign(res["onion_sign_this"].encode(), tor_manager.getPrivatekey(onion2))
+
+ # Bad sign (different address)
+ res = peer.request("announce", {
+ "onions": [onion1], "onion_sign_this": res["onion_sign_this"],
+ "onion_signs": {tor_manager.getPublickey(onion2): sign2},
+ "hashes": [hash1], "port": 15441, "need_types": ["ipv4", "onion"], "need_num": 10, "add": ["onion"]
+ })
+ assert "onion_sign_this" in res
+ site_peers1 = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash1)
+ assert len(site_peers1["onion"]) == 0 # Not added
+
+ # Bad sign (missing one)
+ res = peer.request("announce", {
+ "onions": [onion1, onion1, onion2], "onion_sign_this": res["onion_sign_this"],
+ "onion_signs": {tor_manager.getPublickey(onion1): sign1},
+ "hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ipv4", "onion"], "need_num": 10, "add": ["onion"]
+ })
+ assert "onion_sign_this" in res
+ site_peers1 = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash1)
+ assert len(site_peers1["onion"]) == 0 # Not added
+
+ # Good sign
+ res = peer.request("announce", {
+ "onions": [onion1, onion1, onion2], "onion_sign_this": res["onion_sign_this"],
+ "onion_signs": {tor_manager.getPublickey(onion1): sign1, tor_manager.getPublickey(onion2): sign2},
+ "hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ipv4", "onion"], "need_num": 10, "add": ["onion"]
+ })
+ assert "onion_sign_this" not in res
+
+ # Onion addresses added
+ site_peers1 = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash1)
+ assert len(site_peers1["onion"]) == 1
+ site_peers2 = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash2)
+ assert len(site_peers2["onion"]) == 1
+ site_peers3 = bootstrapper_db.peerList(address="1.2.3.4", port=1234, hash=hash3)
+ assert len(site_peers3["onion"]) == 1
+
+ assert site_peers1["onion"][0] == site_peers2["onion"][0]
+ assert site_peers2["onion"][0] != site_peers3["onion"][0]
+ assert helper.unpackOnionAddress(site_peers1["onion"][0])[0] == onion1 + ".onion"
+ assert helper.unpackOnionAddress(site_peers2["onion"][0])[0] == onion1 + ".onion"
+ assert helper.unpackOnionAddress(site_peers3["onion"][0])[0] == onion2 + ".onion"
+
+ tor_manager.delOnion(onion1)
+ tor_manager.delOnion(onion2)
+
+ def testRequestPeers(self, file_server, site, bootstrapper_db, tor_manager):
+ site.connection_server = file_server
+ file_server.tor_manager = tor_manager
+ hash = hashlib.sha256(site.address.encode()).digest()
+
+ # Request peers from tracker
+ assert len(site.peers) == 0
+ bootstrapper_db.peerAnnounce(ip_type="ipv4", address="1.2.3.4", port=1234, hashes=[hash])
+ site.announcer.announceTracker("zero://%s:%s" % (file_server.ip, file_server.port))
+ assert len(site.peers) == 1
+
+ # Test onion address store
+ bootstrapper_db.peerAnnounce(ip_type="onion", address="bka4ht2bzxchy44r", port=1234, hashes=[hash], onion_signed=True)
+ site.announcer.announceTracker("zero://%s:%s" % (file_server.ip, file_server.port))
+ assert len(site.peers) == 2
+ assert "bka4ht2bzxchy44r.onion:1234" in site.peers
+
+ @pytest.mark.slow
+ def testAnnounce(self, file_server, tor_manager):
+ file_server.tor_manager = tor_manager
+ hash1 = hashlib.sha256(b"1Nekos4fiBqfcazyG1bAxdBT5oBvA76Z").digest()
+ hash2 = hashlib.sha256(b"1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr").digest()
+ peer = Peer("zero.booth.moe", 443, connection_server=file_server)
+ assert peer.request("ping")
+ peer = Peer("boot3rdez4rzn36x.onion", 15441, connection_server=file_server)
+ assert peer.request("ping")
+ res = peer.request("announce", {
+ "hashes": [hash1, hash2],
+ "port": 15441, "need_types": ["ip4", "onion"], "need_num": 100, "add": [""]
+ })
+
+ assert res
+
+ def testBackwardCompatibility(self, file_server, bootstrapper_db):
+ peer = Peer(file_server.ip, 1544, connection_server=file_server)
+ hash1 = hashlib.sha256(b"site1").digest()
+
+ bootstrapper_db.peerAnnounce("ipv4", file_server.ip_external, port=15441, hashes=[hash1], delete_missing_hashes=True)
+
+ # Test with ipv4 need type
+ res = peer.request("announce", {
+ "hashes": [hash1],
+ "port": 15441, "need_types": ["ipv4"], "need_num": 10, "add": []
+ })
+
+ assert len(res["peers"][0]["ipv4"]) == 1
+
+ # Test with ip4 need type
+ res = peer.request("announce", {
+ "hashes": [hash1],
+ "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": []
+ })
+
+ assert len(res["peers"][0]["ip4"]) == 1
diff --git a/plugins/disabled-Bootstrapper/Test/conftest.py b/plugins/disabled-Bootstrapper/Test/conftest.py
new file mode 100644
index 00000000..8c1df5b2
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/Test/conftest.py
@@ -0,0 +1 @@
+from src.Test.conftest import *
\ No newline at end of file
diff --git a/plugins/disabled-Bootstrapper/Test/pytest.ini b/plugins/disabled-Bootstrapper/Test/pytest.ini
new file mode 100644
index 00000000..8ee21268
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/Test/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+python_files = Test*.py
+addopts = -rsxX -v --durations=6
+markers =
+ slow: mark a tests as slow.
+ webtest: mark a test as a webtest.
\ No newline at end of file
diff --git a/plugins/disabled-Bootstrapper/__init__.py b/plugins/disabled-Bootstrapper/__init__.py
new file mode 100644
index 00000000..cce30eea
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/__init__.py
@@ -0,0 +1 @@
+from . import BootstrapperPlugin
\ No newline at end of file
diff --git a/plugins/disabled-Bootstrapper/plugin_info.json b/plugins/disabled-Bootstrapper/plugin_info.json
new file mode 100644
index 00000000..06915d4d
--- /dev/null
+++ b/plugins/disabled-Bootstrapper/plugin_info.json
@@ -0,0 +1,5 @@
+{
+ "name": "Bootstrapper",
+ "description": "Add BitTorrent tracker server like features to your ZeroNet client.",
+ "default": "disabled"
+}
\ No newline at end of file
diff --git a/plugins/disabled-Dnschain/SiteManagerPlugin.py b/plugins/disabled-Dnschain/SiteManagerPlugin.py
new file mode 100644
index 00000000..8b9508f1
--- /dev/null
+++ b/plugins/disabled-Dnschain/SiteManagerPlugin.py
@@ -0,0 +1,153 @@
+import logging, json, os, re, sys, time
+import gevent
+from Plugin import PluginManager
+from Config import config
+from util import Http
+from Debug import Debug
+
+allow_reload = False # No reload supported
+
+log = logging.getLogger("DnschainPlugin")
+
+@PluginManager.registerTo("SiteManager")
+class SiteManagerPlugin(object):
+ dns_cache_path = "%s/dns_cache.json" % config.data_dir
+ dns_cache = None
+
+ # Checks if its a valid address
+ def isAddress(self, address):
+ if self.isDomain(address):
+ return True
+ else:
+ return super(SiteManagerPlugin, self).isAddress(address)
+
+
+ # Return: True if the address is domain
+ def isDomain(self, address):
+ return re.match(r"(.*?)([A-Za-z0-9_-]+\.[A-Za-z0-9]+)$", address)
+
+
+ # Load dns entries from data/dns_cache.json
+ def loadDnsCache(self):
+ if os.path.isfile(self.dns_cache_path):
+ self.dns_cache = json.load(open(self.dns_cache_path))
+ else:
+ self.dns_cache = {}
+ log.debug("Loaded dns cache, entries: %s" % len(self.dns_cache))
+
+
+ # Save dns entries to data/dns_cache.json
+ def saveDnsCache(self):
+ json.dump(self.dns_cache, open(self.dns_cache_path, "wb"), indent=2)
+
+
+ # Resolve domain using dnschain.net
+ # Return: The address or None
+ def resolveDomainDnschainNet(self, domain):
+ try:
+ match = self.isDomain(domain)
+ sub_domain = match.group(1).strip(".")
+ top_domain = match.group(2)
+ if not sub_domain: sub_domain = "@"
+ address = None
+ with gevent.Timeout(5, Exception("Timeout: 5s")):
+ res = Http.get("https://api.dnschain.net/v1/namecoin/key/%s" % top_domain).read()
+ data = json.loads(res)["data"]["value"]
+ if "zeronet" in data:
+ for key, val in data["zeronet"].items():
+ self.dns_cache[key+"."+top_domain] = [val, time.time()+60*60*5] # Cache for 5 hours
+ self.saveDnsCache()
+ return data["zeronet"].get(sub_domain)
+ # Not found
+ return address
+ except Exception as err:
+ log.debug("Dnschain.net %s resolve error: %s" % (domain, Debug.formatException(err)))
+
+
+ # Resolve domain using dnschain.info
+ # Return: The address or None
+ def resolveDomainDnschainInfo(self, domain):
+ try:
+ match = self.isDomain(domain)
+ sub_domain = match.group(1).strip(".")
+ top_domain = match.group(2)
+ if not sub_domain: sub_domain = "@"
+ address = None
+ with gevent.Timeout(5, Exception("Timeout: 5s")):
+ res = Http.get("https://dnschain.info/bit/d/%s" % re.sub(r"\.bit$", "", top_domain)).read()
+ data = json.loads(res)["value"]
+ for key, val in data["zeronet"].items():
+ self.dns_cache[key+"."+top_domain] = [val, time.time()+60*60*5] # Cache for 5 hours
+ self.saveDnsCache()
+ return data["zeronet"].get(sub_domain)
+ # Not found
+ return address
+ except Exception as err:
+ log.debug("Dnschain.info %s resolve error: %s" % (domain, Debug.formatException(err)))
+
+
+ # Resolve domain
+ # Return: The address or None
+ def resolveDomain(self, domain):
+ domain = domain.lower()
+ if self.dns_cache == None:
+ self.loadDnsCache()
+ if domain.count(".") < 2: # Its a topleved request, prepend @. to it
+ domain = "@."+domain
+
+ domain_details = self.dns_cache.get(domain)
+ if domain_details and time.time() < domain_details[1]: # Found in cache and its not expired
+ return domain_details[0]
+ else:
+ # Resovle dns using dnschain
+ thread_dnschain_info = gevent.spawn(self.resolveDomainDnschainInfo, domain)
+ thread_dnschain_net = gevent.spawn(self.resolveDomainDnschainNet, domain)
+ gevent.joinall([thread_dnschain_net, thread_dnschain_info]) # Wait for finish
+
+ if thread_dnschain_info.value and thread_dnschain_net.value: # Booth successfull
+ if thread_dnschain_info.value == thread_dnschain_net.value: # Same returned value
+ return thread_dnschain_info.value
+ else:
+ log.error("Dns %s missmatch: %s != %s" % (domain, thread_dnschain_info.value, thread_dnschain_net.value))
+
+ # Problem during resolve
+ if domain_details: # Resolve failed, but we have it in the cache
+ domain_details[1] = time.time()+60*60 # Dont try again for 1 hour
+ return domain_details[0]
+ else: # Not found in cache
+ self.dns_cache[domain] = [None, time.time()+60] # Don't check again for 1 min
+ return None
+
+
+ # Return or create site and start download site files
+ # Return: Site or None if dns resolve failed
+ def need(self, address, all_file=True):
+ if self.isDomain(address): # Its looks like a domain
+ address_resolved = self.resolveDomain(address)
+ if address_resolved:
+ address = address_resolved
+ else:
+ return None
+
+ return super(SiteManagerPlugin, self).need(address, all_file)
+
+
+ # Return: Site object or None if not found
+ def get(self, address):
+ if self.sites == None: # Not loaded yet
+ self.load()
+ if self.isDomain(address): # Its looks like a domain
+ address_resolved = self.resolveDomain(address)
+ if address_resolved: # Domain found
+ site = self.sites.get(address_resolved)
+ if site:
+ site_domain = site.settings.get("domain")
+ if site_domain != address:
+ site.settings["domain"] = address
+ else: # Domain not found
+ site = self.sites.get(address)
+
+ else: # Access by site address
+ site = self.sites.get(address)
+ return site
+
diff --git a/plugins/disabled-Dnschain/UiRequestPlugin.py b/plugins/disabled-Dnschain/UiRequestPlugin.py
new file mode 100644
index 00000000..8ab9d5c5
--- /dev/null
+++ b/plugins/disabled-Dnschain/UiRequestPlugin.py
@@ -0,0 +1,34 @@
+import re
+from Plugin import PluginManager
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ def __init__(self, server = None):
+ from Site import SiteManager
+ self.site_manager = SiteManager.site_manager
+ super(UiRequestPlugin, self).__init__(server)
+
+
+ # Media request
+ def actionSiteMedia(self, path):
+ match = re.match(r"/media/(?P[A-Za-z0-9-]+\.[A-Za-z0-9\.-]+)(?P/.*|$)", path)
+ if match: # Its a valid domain, resolve first
+ domain = match.group("address")
+ address = self.site_manager.resolveDomain(domain)
+ if address:
+ path = "/media/"+address+match.group("inner_path")
+ return super(UiRequestPlugin, self).actionSiteMedia(path) # Get the wrapper frame output
+
+
+ # Is mediarequest allowed from that referer
+ def isMediaRequestAllowed(self, site_address, referer):
+ referer_path = re.sub("http[s]{0,1}://.*?/", "/", referer).replace("/media", "") # Remove site address
+ referer_site_address = re.match(r"/(?P[A-Za-z0-9\.-]+)(?P/.*|$)", referer_path).group("address")
+
+ if referer_site_address == site_address: # Referer site address as simple address
+ return True
+ elif self.site_manager.resolveDomain(referer_site_address) == site_address: # Referer site address as dns
+ return True
+ else: # Invalid referer
+ return False
+
diff --git a/plugins/disabled-Dnschain/__init__.py b/plugins/disabled-Dnschain/__init__.py
new file mode 100644
index 00000000..2b36af5d
--- /dev/null
+++ b/plugins/disabled-Dnschain/__init__.py
@@ -0,0 +1,3 @@
+# This plugin is experimental, if you really want to enable uncomment the following lines:
+# import DnschainPlugin
+# import SiteManagerPlugin
\ No newline at end of file
diff --git a/plugins/disabled-DonationMessage/DonationMessagePlugin.py b/plugins/disabled-DonationMessage/DonationMessagePlugin.py
new file mode 100644
index 00000000..8cf0d541
--- /dev/null
+++ b/plugins/disabled-DonationMessage/DonationMessagePlugin.py
@@ -0,0 +1,22 @@
+import re
+from Plugin import PluginManager
+
+# Warning: If you modify the donation address then renmae the plugin's directory to "MyDonationMessage" to prevent the update script overwrite
+
+
+@PluginManager.registerTo("UiRequest")
+class UiRequestPlugin(object):
+ # Inject a donation message to every page top right corner
+ def renderWrapper(self, *args, **kwargs):
+ body = super(UiRequestPlugin, self).renderWrapper(*args, **kwargs) # Get the wrapper frame output
+
+ inject_html = """
+
+ Please donate to help to keep this ZeroProxy alive
+