Compare commits

...

112 Commits

Author SHA1 Message Date
William Allen a1fa05d157
Update setup.py 2023-06-30 03:37:17 -05:00
William Allen c8e7e2640c
Update setup.py 2023-06-29 18:45:16 -05:00
ChiaMineJP 65d7bc5e11
Updated GUI ref 2023-06-24 00:24:32 +09:00
ChiaMineJP 55ce2a30b3
Fixed CI errors 2023-06-24 00:23:14 +09:00
ChiaMineJP 7effbe5f89
Removed `harvesting_mode_update` protocol message 2023-06-24 00:23:13 +09:00
ChiaMineJP e4d95fa608
Updated harvester rpc client 2023-06-24 00:23:11 +09:00
ChiaMineJP ee5e0625c4
Rename `update_harvesting_mode` -> `update_harvester_config` 2023-06-24 00:22:54 +09:00
ChiaMineJP b6c7fd5b86
Rename `get_harvesting_mode` -> `get_harvester_config` 2023-06-24 00:22:53 +09:00
William Allen d595b4fa05
bumping chiapos 2023-06-24 00:22:36 +09:00
ChiaMineJP c8cfab9c75
Fixed a lint error 2023-06-24 00:22:35 +09:00
William Allen b1d037be0b
Bumping chiapos 2023-06-24 00:22:18 +09:00
ChiaMineJP bd78e69d40
Fixed a lint error 2023-06-24 00:21:37 +09:00
ChiaMineJP 61531353dd
Fixed a test error 2023-06-24 00:21:35 +09:00
ChiaMineJP 02792f90c9
Fixed CI errors 2023-06-24 00:21:34 +09:00
ChiaMineJP 841da926ac
Fixed mypy errors 2023-06-24 00:21:33 +09:00
ChiaMineJP 5d9bb1f8a8
Updated pool state API 2023-06-24 00:21:32 +09:00
ChiaMineJP 4aef046201
Updated Farmer RPC APIs 2023-06-24 00:21:31 +09:00
ChiaMineJP 421fab7672
Revert "Tried to make pool state dict as a TypedDict"
This reverts commit 226dfea1f3e9d2d82ccccffe99eda985efb36c45.
2023-06-24 00:21:30 +09:00
ChiaMineJP be5a0633a8
Tried to make pool state dict as a TypedDict 2023-06-24 00:21:29 +09:00
ChiaMineJP 2c8e3815c7
Added `block_won` and `last_time_farmed` to the response of `get_farmed_amount` Wallet RPC API 2023-06-24 00:21:28 +09:00
ChiaMineJP 7e4f3b4be5
Added `average_block_time` to the response of `get_blockchain_state` FullNode RPC API 2023-06-24 00:21:26 +09:00
wallentx a0c0466f3f
Bumping chiapos version 2023-06-24 00:21:25 +09:00
ChiaMineJP a15aad04ef
Fixed a minor issue 2023-06-24 00:21:24 +09:00
ChiaMineJP 676776d073
Trying to fix error found in a test 2023-06-24 00:21:23 +09:00
ChiaMineJP 235477ea43
Fixed a test which makes a dummy plot of ksize > 50 2023-06-24 00:21:22 +09:00
ChiaMineJP c47e164d25
Fixed tests 2023-06-24 00:21:21 +09:00
ChiaMineJP 5995a04ba2
Revert the return type of `expected_plot_size` to `uint64` from `int` 2023-06-24 00:21:20 +09:00
ChiaMineJP 6f7c892ea8
Expand bladebit compatibility 2023-06-24 00:21:19 +09:00
ChiaMineJP e2c0412d9f
Fixed a minor issue 2023-06-24 00:21:18 +09:00
ChiaMineJP 911ffa73e2
Made the return type of `expected_plot_size` `int` 2023-06-24 00:21:17 +09:00
ChiaMineJP 33e4d1f5c7
Fixed errors found in tests 2023-06-24 00:21:16 +09:00
ChiaMineJP 85a43a6468
Added `total_effective_plot_size` to `Receiver.to_dict()` 2023-06-24 00:21:15 +09:00
ChiaMineJP 2db786c565
Improved accuracy of expected time to win 2023-06-24 00:21:14 +09:00
ChiaMineJP e4f47db315
Fixed test files 2023-06-24 00:21:12 +09:00
ChiaMineJP a488de1174
Updated tests 2023-06-24 00:21:11 +09:00
ChiaMineJP ae23ef003a
Fixed lint errors 2023-06-24 00:21:10 +09:00
ChiaMineJP b5f4067326
Fixed errors found in tests 2023-06-24 00:21:09 +09:00
ChiaMineJP 4bc2b303a7
Fixed isort issue 2023-06-24 00:21:08 +09:00
ChiaMineJP 183e5b6493
Updated `chia farm summary` command output 2023-06-24 00:21:06 +09:00
ChiaMineJP e9cd6a9ca7
Fixed lint errors 2023-06-24 00:21:05 +09:00
ChiaMineJP aa24958772
Updated the value of `UI_ACTUAL_SPACE_CONSTANT_FACTOR` to `0.78` 2023-06-24 00:21:04 +09:00
ChiaMineJP 6d6067f27d
Fixed tests 2023-06-24 00:21:03 +09:00
ChiaMineJP 87dcd74d1f
Fixed test failure 2023-06-24 00:21:02 +09:00
ChiaMineJP bfd89e8344
Updated tests and fixed lint errors 2023-06-24 00:21:01 +09:00
William Allen 306c428950
Bumping chiapos 2023-06-24 00:21:00 +09:00
ChiaMineJP 0f700cfe76
Drop harvester/farmer protocol compatibility 2023-06-24 00:20:58 +09:00
ChiaMineJP 57eedb6293
Added customizable harvester configs 2023-06-24 00:20:57 +09:00
ChiaMineJP 5e30fb4efa
Reverted: `self.log` -> `log` 2023-06-24 00:20:56 +09:00
ChiaMineJP 1230fa3872
Removed `compression_level` from `DiskCacheEntry` and `CacheEntry` 2023-06-24 00:20:55 +09:00
ChiaMineJP c0b5b64f24
Removed `compression_level` from `PlotInfo` 2023-06-24 00:20:54 +09:00
ChiaMineJP 4599f936f4
Added more customizable configs for harvester 2023-06-24 00:20:53 +09:00
ChiaMineJP abc50ab2a5
Fixed a minor typo 2023-06-24 00:20:52 +09:00
ChiaMineJP 74192c72df
Fixed a minor issue 2023-06-24 00:20:51 +09:00
ChiaMineJP e88983663f
Implemented API for harvester setting 2023-06-24 00:20:50 +09:00
ChiaMineJP 08c6102ca8
Implemented handlers for new bladebit options 2023-06-24 00:20:48 +09:00
ChiaMineJP 624278412f
Added rate limit settings for new protocol messages 2023-06-24 00:20:47 +09:00
ChiaMineJP 7b4104d217
Fixed version 2023-06-24 00:20:46 +09:00
ChiaMineJP c36ce4d153
Removed redundant APIs 2023-06-24 00:20:45 +09:00
ChiaMineJP 7e40bea96a
Added `plot_sync_start_v2` message 2023-06-24 00:20:44 +09:00
ChiaMineJP 6e50651239
Activated `get_compresion_level` of `chiapos` 2023-06-24 00:20:43 +09:00
ChiaMineJP 07ad39ee21
Update code for version compare 2023-06-24 00:20:42 +09:00
ChiaMineJP 2e0f043d7e
Checkpont 2023-06-24 00:20:38 +09:00
William Allen 7ca050273c
catchup: into long_lived/compression from release/1.8.2 @ ff11e3e06a (#15577)
Source hash: ff11e3e06a
Remaining commits: 0
2023-06-23 10:13:46 -05:00
wallentx 8ade580cf0
Merge commit 'ff11e3e06add4e09b1e576cc63f960b8894decd1' into catchup/long_lived_compression_from_release_1.8.2_ff11e3e06add4e09b1e576cc63f960b8894decd1 2023-06-21 14:50:15 -05:00
William Allen 38c877f17c
catchup: into long_lived/compression from main _take_2 @ 330c75022a (#15447)
Source hash: 330c75022a
Remaining commits: 0
2023-06-05 12:15:01 -05:00
Kyle Altendorf 58ac6f1114
Merge commit '330c75022ab62513f53e3b06ff51bb61e86400ff' into catchup/long_lived_compression_from_main_take_2_330c75022ab62513f53e3b06ff51bb61e86400ff 2023-06-05 12:12:08 -04:00
Sebastjan Trepca 330c75022a
reduce the resend timeout to fix flakiness (#15435)
reduce the resend timeout
2023-06-05 10:54:03 -05:00
Matt Hauff 201f4e459d
Fix vcs get command when no proofs exist yet (#15432) 2023-06-02 17:54:31 -05:00
Matt Hauff 5dd204a001
Fix `.get_next_from_coin_spend` on VerifiedCredential and test new puzhashes (#15423)
Fix .get_next_from_coin_spend on VCWallet and test new puzhashes
2023-06-02 17:54:04 -05:00
Arvid Norberg ae0939af79
bump SOFT_FORK3_HEIGHT to align with the next release cycle (#15428) 2023-06-01 21:48:01 -05:00
dustinface 93af781b4e
wallet: Drop unused conditions helper/parameter (#15421) 2023-06-01 14:27:35 -05:00
William Allen b4a22b9be9
checkpoint: into main from release/1.8.2 @ bcead6a600 (#15419)
Source hash: bcead6a600
Remaining commits: 0
2023-06-01 14:25:41 -05:00
Amine Khaldi 18a08f90cf
Merge commit 'bcead6a60077e8ceee7c8dccef15dde1252787f4' into checkpoint/main_from_release_1.8.2_bcead6a60077e8ceee7c8dccef15dde1252787f4 2023-06-01 11:39:26 +01:00
William Allen 4b5785ec45
catchup: into long_lived/compression from main @ 70a8874ba5 (#15413)
Source hash: 70a8874ba5
Remaining commits: 0
2023-05-31 13:01:37 -05:00
wallentx 45552c26f6
Merge commit '70a8874ba59ea6c118ff775df2ff84df02ad8449' into catchup/long_lived_compression_from_main_70a8874ba59ea6c118ff775df2ff84df02ad8449 2023-05-31 12:19:39 -05:00
William Allen 4d819c2cd9
catchup: into long_lived/compression from main @ fbe7daa32e (#15389)
Source hash: fbe7daa32e
Remaining commits: 0
2023-05-26 11:55:11 -05:00
wallentx 6e6eccaf21
Merge commit 'fbe7daa32e6090d7b440024cba0087d117d04611' into catchup/long_lived_compression_from_main_fbe7daa32e6090d7b440024cba0087d117d04611 2023-05-26 11:33:58 -05:00
William Allen bca4adc8ee
catchup: into long_lived/compression from main @ ff226e8cb7 (#15368)
Source hash: ff226e8cb7
Remaining commits: 0
2023-05-23 13:17:39 -05:00
wallentx ced990a751
Merge commit 'ff226e8cb78bf7d30da35cc7644229fb730214ca' into catchup/long_lived_compression_from_main_ff226e8cb78bf7d30da35cc7644229fb730214ca 2023-05-23 12:41:53 -05:00
William Allen 6570273feb
catchup: into long_lived/compression from main @ 1777ecc2e8 (#15337)
Source hash: 1777ecc2e8
Remaining commits: 0
2023-05-19 20:22:38 -05:00
wallentx eeadcb480a
Merge commit '1777ecc2e8b46a4c1a42466e5be55e87db7b4fa3' into catchup/long_lived_compression_from_main_1777ecc2e8b46a4c1a42466e5be55e87db7b4fa3 2023-05-19 16:17:19 -05:00
wallentx 20984a9783
bumping chiapos 2023-05-19 14:22:28 -05:00
William Allen ab01dfccdc
Fixing missing args for 'configure_decompressor()' seen in 'chia plots check, and set 'parallel_decompressers_count: 1' as the default (#15271)
* Fixing missing arguments for 'configure_decompressor()' seen in 'chia plots check'

* Setting 'parallel_decompressers_count: 1' as default
2023-05-19 14:19:17 -05:00
Florin Chirica 01f0b3cf79
Allow only up to C7. 2023-05-01 21:16:27 +03:00
Florin Chirica b7273853c5
Initial commit cuda. 2023-04-21 18:39:35 +03:00
William Allen 82c90ae4c4
Merge branch 'main' into fc.compression 2023-03-28 17:41:55 -05:00
William Allen bd72035f6a
Bumping chiapos version 2023-03-20 21:44:01 -05:00
Florin Chirica b2a1dc3279
Fix tests. 2023-03-14 22:32:58 +02:00
Florin Chirica bba2540caa
Fix check plots. 2023-03-14 22:04:15 +02:00
Florin Chirica 89b80dba15
b8 2023-03-14 21:53:44 +02:00
wallentx a71ce5cc47
Bumping chiapos 2023-03-14 14:23:03 -05:00
wallentx 1564b8cb6d
bumping chiapos version 2023-03-14 14:09:47 -05:00
Florin Chirica 346ca39e86
Checkpoint. 2023-03-14 20:56:02 +02:00
Florin Chirica 72d4a22fe7
Remove debug info. 2023-03-14 00:10:46 +02:00
Florin Chirica 33c4f1a708
Update check_plots.py 2023-03-13 21:51:23 +02:00
Florin Chirica 56b88c87f2
Update check_plots.py 2023-03-13 21:22:28 +02:00
wallentx 3fb39143e9
Fixing lint 2023-03-13 14:20:23 -05:00
Florin Chirica cf425d0368
PlotInfo. 2023-03-13 21:05:05 +02:00
Florin Chirica c44d38929c
Update check_plots.py 2023-03-13 20:52:03 +02:00
Florin Chirica 198c53d555
Update check_plots.py 2023-03-13 20:41:50 +02:00
Florin Chirica 1f50f0e06a
Update check_plots.py 2023-03-13 20:16:35 +02:00
Florin Chirica 67bd1d1da2
First attempt parallel check plots. 2023-03-13 20:12:57 +02:00
Florin Chirica 0db0945875
Don't throw on dropped proof. 2023-03-13 19:41:26 +02:00
Florin Chirica a6a04be424
Don't exit early. 2023-03-13 16:25:24 +02:00
William Allen 4b32ca4e72
bump chiapos and fix lint (#14807) 2023-03-12 03:09:02 -05:00
Florin Chirica 84e4cec136
Update harvester_api.py 2023-03-11 21:23:04 +02:00
Florin Chirica d16abdddf7
Debug info. 2023-03-11 21:19:51 +02:00
wallentx 5e39e59312
Bumping chipos version 2023-03-10 13:28:30 -06:00
Florin Chirica d50af0ee52
Try pre-release. 2023-03-10 01:13:34 +02:00
Florin Chirica c96b36cd03
Fix bug. 2023-03-09 22:32:34 +02:00
Florin Chirica 5833e01c62
Checkpoint. 2023-03-09 20:03:07 +02:00
Florin Chirica fcea3d77e7
Compression branch. 2023-03-09 02:10:17 +02:00
46 changed files with 965 additions and 219 deletions

@ -1 +1 @@
Subproject commit 1d5f7f2f296a072edf9cd6c431b7be19545a1700
Subproject commit a9f6024367352c24cef444a66c16979c970c47b5

View File

@ -121,6 +121,7 @@ async def summary(
class PlotStats:
total_plot_size = 0
total_effective_plot_size = 0
total_plots = 0
if harvesters_summary is not None:
@ -142,10 +143,15 @@ async def summary(
print(f" Loading plots: {syncing['plot_files_processed']} / {syncing['plot_files_total']}")
else:
total_plot_size_harvester = harvester_dict["total_plot_size"]
total_effective_plot_size_harvester = harvester_dict["total_effective_plot_size"]
plot_count_harvester = harvester_dict["plots"]
PlotStats.total_plot_size += total_plot_size_harvester
PlotStats.total_effective_plot_size += total_effective_plot_size_harvester
PlotStats.total_plots += plot_count_harvester
print(f" {plot_count_harvester} plots of size: {format_bytes(total_plot_size_harvester)}")
print(
f" {plot_count_harvester} plots of size: {format_bytes(total_plot_size_harvester)} raw, "
f"{format_bytes(total_effective_plot_size_harvester, True)} (effective)"
)
if len(harvesters_local) > 0:
print(f"Local Harvester{'s' if len(harvesters_local) > 1 else ''}")
@ -156,8 +162,10 @@ async def summary(
print(f"Plot count for all harvesters: {PlotStats.total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(PlotStats.total_plot_size))
print(
f"Total size of plots: {format_bytes(PlotStats.total_plot_size)}, "
f"{format_bytes(PlotStats.total_effective_plot_size, True)} (effective)"
)
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
@ -170,7 +178,9 @@ async def summary(
minutes = -1
if blockchain_state is not None and harvesters_summary is not None:
proportion = PlotStats.total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
proportion = (
PlotStats.total_effective_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
)
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if harvesters_summary is not None and PlotStats.total_plots == 0:

View File

@ -57,8 +57,8 @@ default_kwargs = {
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
"SOFT_FORK2_HEIGHT": 3886635,
# Spetember 2023
"SOFT_FORK3_HEIGHT": 4200000,
# October 23, 2023
"SOFT_FORK3_HEIGHT": 4410000,
# June 2024
"HARD_FORK_HEIGHT": 5496000,
# June 2027

View File

@ -2,12 +2,12 @@ from __future__ import annotations
from chia.util.ints import uint64
# The actual space in bytes of a plot, is _expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO
# The actual space in bytes of a plot, is expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO
# This is not used in consensus, only for display purposes
UI_ACTUAL_SPACE_CONSTANT_FACTOR = 0.762
UI_ACTUAL_SPACE_CONSTANT_FACTOR = 0.78
def _expected_plot_size(k: int) -> uint64:
def expected_plot_size(k: int) -> uint64:
"""
Given the plot size parameter k (which is between 32 and 59), computes the
expected size of the plot in bytes (times a constant factor). This is based on efficient encoding

View File

@ -1,7 +1,7 @@
from __future__ import annotations
from chia.consensus.constants import ConsensusConstants
from chia.consensus.pos_quality import _expected_plot_size
from chia.consensus.pos_quality import expected_plot_size
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint64, uint128
@ -62,6 +62,6 @@ def calculate_iterations_quality(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
// (int(pow(2, 256)) * int(expected_plot_size(size)))
)
return max(iters, uint64(1))

View File

@ -85,13 +85,11 @@ if getattr(sys, "frozen", False):
def executable_for_service(service_name: str) -> str:
application_path = os.path.dirname(sys.executable)
executable = name_map[service_name]
if sys.platform == "win32" or sys.platform == "cygwin":
executable = name_map[service_name]
path = f"{application_path}/{executable}.exe"
return path
return f"{application_path}/{executable}.exe"
else:
path = f"{application_path}/{name_map[service_name]}"
return path
return f"{application_path}/{executable}"
else:
application_path = os.path.dirname(__file__)
@ -721,28 +719,44 @@ class WebSocketServer:
def _bladebit_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
plot_type = request["plot_type"]
assert plot_type == "ramplot" or plot_type == "diskplot"
assert plot_type == "ramplot" or plot_type == "diskplot" or plot_type == "cudaplot"
command_args: List[str] = []
if plot_type == "ramplot":
w = request.get("w", False) # Warm start
m = request.get("m", False) # Disable NUMA
no_cpu_affinity = request.get("no_cpu_affinity", False)
# Common options among diskplot, ramplot, cudaplot
w = request.get("w", False) # Warm start
m = request.get("m", False) # Disable NUMA
no_cpu_affinity = request.get("no_cpu_affinity", False)
compress = request.get("compress", None) # Compression level
if w is True:
command_args.append("--warmstart")
if m is True:
command_args.append("--nonuma")
if no_cpu_affinity is True:
command_args.append("--no-cpu-affinity")
if w is True:
command_args.append("--warmstart")
if m is True:
command_args.append("--nonuma")
if no_cpu_affinity is True:
command_args.append("--no-cpu-affinity")
if compress is not None and str(compress).isdigit():
command_args.append("--compress")
command_args.append(str(compress))
# ramplot don't accept any more options
if plot_type == "ramplot":
return command_args
# Options only applicable for cudaplot
if plot_type == "cudaplot":
device_index = request.get("device", None)
no_direct_downloads = request.get("no_direct_downloads", False)
if device_index is not None and str(device_index).isdigit():
command_args.append("--device")
command_args.append(str(device_index))
if no_direct_downloads:
command_args.append("--no-direct-downloads")
return command_args
# if plot_type == "diskplot"
w = request.get("w", False) # Warm start
m = request.get("m", False) # Disable NUMA
no_cpu_affinity = request.get("no_cpu_affinity", False)
# memo = request["memo"]
t1 = request["t"] # Temp directory
t2 = request.get("t2") # Temp2 directory
@ -757,13 +771,6 @@ class WebSocketServer:
no_t1_direct = request.get("no_t1_direct", False)
no_t2_direct = request.get("no_t2_direct", False)
if w is True:
command_args.append("--warmstart")
if m is True:
command_args.append("--nonuma")
if no_cpu_affinity is True:
command_args.append("--no-cpu-affinity")
command_args.append("-t")
command_args.append(t1)
if t2:
@ -833,7 +840,7 @@ class WebSocketServer:
# plotter command must be either
# 'chia plotters bladebit ramplot' or 'chia plotters bladebit diskplot'
plot_type = request["plot_type"]
assert plot_type == "diskplot" or plot_type == "ramplot"
assert plot_type == "diskplot" or plot_type == "ramplot" or plot_type == "cudaplot"
command_args.append(plot_type)
command_args.extend(self._common_plotting_command_args(request, ignoreCount))

View File

@ -5,6 +5,7 @@ import json
import logging
import time
import traceback
from math import floor
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple
@ -41,7 +42,7 @@ from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import config_path_for_filename, load_config, lock_and_load_config, save_config
from chia.util.errors import KeychainProxyConnectionFailure
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint64
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.util.logging import TimedDuplicateFilter
from chia.wallet.derive_keys import (
@ -61,6 +62,17 @@ UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_INFO_FAILURE_RETRY_INTERVAL: int = 120
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
def strip_old_entries(pairs: List[Tuple[uint32, Any]], before: float) -> List[Tuple[uint32, Any]]:
for index, [timestamp, points] in enumerate(pairs):
if timestamp >= before:
if index == 0:
return pairs
if index > 0:
return pairs[index:]
return []
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
@ -122,6 +134,9 @@ class Farmer:
self.all_root_sks: List[PrivateKey] = []
# Use to find missing signage points. (new_signage_point, time)
self.prev_signage_point: Optional[Tuple[uint64, farmer_protocol.NewSignagePoint]] = None
def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]:
return default_get_connections(server=self.server, request_node_type=request_node_type)
@ -256,8 +271,10 @@ class Farmer:
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str) -> None:
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
self.increment_pool_stats(
p2_singleton_puzzle_hash,
"pool_errors",
value=ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict(),
)
def on_disconnect(self, connection: WSChiaConnection) -> None:
@ -324,7 +341,9 @@ class Farmer:
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.increment_pool_stats(
pool_config.p2_singleton_puzzle_hash, "pool_errors", value=response
)
self.log.log(log_level, f"GET /farmer response: {response}")
return response
else:
@ -366,7 +385,9 @@ class Farmer:
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.increment_pool_stats(
pool_config.p2_singleton_puzzle_hash, "pool_errors", value=response
)
self.log.log(log_level, f"POST /farmer response: {response}")
return response
else:
@ -408,7 +429,9 @@ class Farmer:
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.increment_pool_stats(
pool_config.p2_singleton_puzzle_hash, "pool_errors", value=response
)
self.log.log(log_level, f"PUT /farmer response: {response}")
else:
self.handle_failed_pool_response(
@ -446,6 +469,7 @@ class Farmer:
if p2_singleton_puzzle_hash not in self.pool_state:
self.pool_state[p2_singleton_puzzle_hash] = {
"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex(),
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
@ -455,11 +479,23 @@ class Farmer:
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"valid_partials_since_start": 0,
"valid_partials_24h": [],
"invalid_partials_since_start": 0,
"invalid_partials_24h": [],
"stale_partials_since_start": 0,
"stale_partials_24h": [],
"missing_partials_since_start": 0,
"missing_partials_24h": [],
"authentication_token_timeout": None,
"plot_count": 0,
"pool_config": pool_config,
}
self.log.info(f"Added pool: {pool_config}")
else:
self.pool_state[p2_singleton_puzzle_hash]["pool_config"] = pool_config
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
@ -670,6 +706,62 @@ class Farmer:
raise KeyError(f"Receiver missing for {node_id}")
return receiver
def increment_pool_stats(
self,
p2_singleton_puzzle_hash: bytes32,
name: str,
count: int = 1,
value: Any = None,
) -> None:
pool_state = self.pool_state[p2_singleton_puzzle_hash]
if pool_state is None:
return
if f"{name}_since_start" in pool_state:
pool_state[f"{name}_since_start"] += count
if f"{name}_24h" in pool_state:
if value is None:
pool_state[f"{name}_24h"].append((uint32(time.time()), pool_state["current_difficulty"]))
else:
pool_state[f"{name}_24h"].append((uint32(time.time()), value))
# Age out old 24h information for every signage point regardless
# of any failures. Note that this still lets old data remain if
# the client isn't receiving signage points.
cutoff_24h = time.time() - (24 * 60 * 60)
self.pool_state[p2_singleton_puzzle_hash][f"{name}_24h"] = strip_old_entries(
pairs=pool_state[f"{name}_24h"], before=cutoff_24h
)
def check_missing_signage_points(
self, timestamp: uint64, new_signage_point: farmer_protocol.NewSignagePoint
) -> Optional[Tuple[uint64, uint32]]:
if self.prev_signage_point is None:
self.prev_signage_point = (timestamp, new_signage_point)
return None
prev_time, prev_sp = self.prev_signage_point
self.prev_signage_point = (timestamp, new_signage_point)
if prev_sp.challenge_hash == new_signage_point.challenge_hash:
missing_sps = new_signage_point.signage_point_index - prev_sp.signage_point_index - 1
if missing_sps > 0:
return timestamp, uint32(missing_sps)
return None
actual_sp_interval_seconds = float(timestamp - prev_time)
if actual_sp_interval_seconds <= 0:
return None
expected_sp_interval_seconds = self.constants.SUB_SLOT_TIME_TARGET / self.constants.NUM_SPS_SUB_SLOT
allowance = 1.6 # Should be chosen from the range (1 <= allowance < 2)
if actual_sp_interval_seconds < expected_sp_interval_seconds * allowance:
return None
skipped_sps = uint32(floor(actual_sp_interval_seconds / expected_sp_interval_seconds))
if skipped_sps <= 0:
return None
return timestamp, skipped_sps
async def _periodically_update_pool_state_task(self) -> None:
time_slept = 0
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")

View File

@ -2,14 +2,14 @@ from __future__ import annotations
import json
import time
from typing import Any, Dict, List, Optional, Tuple
from typing import Any, Dict, List, Optional
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
from chia import __version__
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.farmer.farmer import Farmer, strip_old_entries
from chia.harvester.harvester_api import HarvesterAPI
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import (
@ -39,17 +39,7 @@ from chia.types.blockchain_format.proof_of_space import (
)
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.api_decorators import api_request
from chia.util.ints import uint32, uint64
def strip_old_entries(pairs: List[Tuple[float, Any]], before: float) -> List[Tuple[float, Any]]:
for index, [timestamp, points] in enumerate(pairs):
if timestamp >= before:
if index == 0:
return pairs
if index > 0:
return pairs[index:]
return []
from chia.util.ints import uint16, uint32, uint64
class FarmerAPI:
@ -149,6 +139,11 @@ class FarmerAPI:
pool_state_dict: Dict[str, Any] = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "missing_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
if pool_state_dict["current_difficulty"] is None:
@ -156,6 +151,11 @@ class FarmerAPI:
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "missing_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
required_iters = calculate_iterations_quality(
@ -171,6 +171,11 @@ class FarmerAPI:
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "invalid_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
@ -179,6 +184,11 @@ class FarmerAPI:
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "missing_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
# Submit partial to pool
@ -204,6 +214,11 @@ class FarmerAPI:
response: Any = await peer.call_api(HarvesterAPI.request_signatures, request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "invalid_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
assert len(response.message_signatures) == 1
@ -228,6 +243,11 @@ class FarmerAPI:
)
if authentication_sk is None:
self.farmer.log.error(f"No authentication sk for {p2_singleton_puzzle_hash}")
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "missing_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
@ -240,8 +260,12 @@ class FarmerAPI:
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
self.farmer.increment_pool_stats(
p2_singleton_puzzle_hash,
"points_found",
count=pool_state_dict["current_difficulty"],
value=pool_state_dict["current_difficulty"],
)
self.farmer.log.debug(f"POST /partial request {post_partial_request}")
try:
async with aiohttp.ClientSession() as session:
@ -259,7 +283,16 @@ class FarmerAPI:
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
self.farmer.increment_pool_stats(
p2_singleton_puzzle_hash, "pool_errors", value=pool_response
)
if pool_response["error_code"] == PoolErrorCode.TOO_LATE.value:
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "stale_partials")
else:
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "invalid_partials")
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
@ -268,14 +301,27 @@ class FarmerAPI:
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "valid_partials")
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
self.farmer.increment_pool_stats(
p2_singleton_puzzle_hash,
"points_acknowledged",
count=new_difficulty,
value=new_difficulty,
)
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
error_resp = {"error_code": uint16(PoolErrorCode.REQUEST_FAILED.value), "error_message": str(e)}
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "pool_errors", value=error_resp)
self.farmer.increment_pool_stats(p2_singleton_puzzle_hash, "invalid_partials")
self.farmer.state_changed(
"failed_partial",
{"p2_singleton_puzzle_hash": p2_singleton_puzzle_hash.hex()},
)
return
self.farmer.state_changed(
@ -488,7 +534,11 @@ class FarmerAPI:
# the client isn't receiving signage points.
cutoff_24h = time.time() - (24 * 60 * 60)
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
for key in ["points_found_24h", "points_acknowledged_24h"]:
for key in [
"points_found_24h",
"points_acknowledged_24h",
"pool_errors_24h",
]:
if key not in pool_dict:
continue
@ -498,9 +548,14 @@ class FarmerAPI:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
now = uint64(int(time.time()))
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = now
missing_signage_points = self.farmer.check_missing_signage_points(now, new_signage_point)
self.farmer.state_changed(
"new_signage_point",
{"sp_hash": new_signage_point.challenge_chain_sp, "missing_signage_points": missing_signage_points},
)
@api_request()
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues) -> None:
@ -521,8 +576,8 @@ class FarmerAPI:
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request()
async def farming_info(self, request: farmer_protocol.FarmingInfo) -> None:
@api_request(peer_required=True)
async def farming_info(self, request: farmer_protocol.FarmingInfo, peer: WSChiaConnection) -> None:
self.farmer.state_changed(
"new_farming_info",
{
@ -533,6 +588,8 @@ class FarmerAPI:
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"node_id": peer.peer_node_id,
"lookup_time": request.lookup_time,
}
},
)

View File

@ -4,6 +4,7 @@ import asyncio
import concurrent
import dataclasses
import logging
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
@ -14,18 +15,22 @@ from chia.consensus.constants import ConsensusConstants
from chia.plot_sync.sender import Sender
from chia.plotting.manager import PlotManager
from chia.plotting.util import (
HarvestingMode,
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
add_plot_directory,
get_harvester_config,
get_plot_directories,
remove_plot,
remove_plot_directory,
update_harvester_config,
)
from chia.rpc.rpc_server import StateChangedProtocol, default_get_connections
from chia.server.outbound_message import NodeType
from chia.server.server import ChiaServer
from chia.server.ws_connection import WSChiaConnection
from chia.util.ints import uint32
log = logging.getLogger(__name__)
@ -41,6 +46,7 @@ class Harvester:
_refresh_lock: asyncio.Lock
event_loop: asyncio.events.AbstractEventLoop
_server: Optional[ChiaServer]
_mode: HarvestingMode
@property
def server(self) -> ChiaServer:
@ -72,7 +78,6 @@ class Harvester:
self.plot_manager = PlotManager(
root_path, refresh_parameter=refresh_parameter, refresh_callback=self._plot_refresh_callback
)
self.plot_sync_sender = Sender(self.plot_manager)
self._shut_down = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self._server = None
@ -80,6 +85,32 @@ class Harvester:
self.state_changed_callback: Optional[StateChangedProtocol] = None
self.parallel_read: bool = config.get("parallel_read", True)
context_count = config.get("parallel_decompressers_count", 5)
thread_count = config.get("decompresser_thread_count", 0)
if thread_count == 0:
thread_count = multiprocessing.cpu_count() // 2
disable_cpu_affinity = config.get("disable_cpu_affinity", False)
max_compression_level_allowed = config.get("max_compression_level_allowed", 7)
use_gpu_harvesting = config.get("use_gpu_harvesting", False)
gpu_index = config.get("gpu_index", 0)
enforce_gpu_index = config.get("enforce_gpu_index", False)
try:
self._mode = self.plot_manager.configure_decompresser(
context_count,
thread_count,
disable_cpu_affinity,
max_compression_level_allowed,
use_gpu_harvesting,
gpu_index,
enforce_gpu_index,
)
except Exception as e:
self.log.error(f"{type(e)} {e} while configuring decompresser.")
raise
self.plot_sync_sender = Sender(self.plot_manager, self._mode)
async def _start(self) -> None:
self._refresh_lock = asyncio.Lock()
self.event_loop = asyncio.get_running_loop()
@ -146,6 +177,7 @@ class Harvester:
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": int(plot_info.time_modified),
"compression_level": prover.get_compresion_level(),
}
)
self.log.debug(
@ -178,5 +210,42 @@ class Harvester:
self.plot_manager.trigger_refresh()
return True
async def get_harvester_config(self) -> Dict[str, Any]:
return get_harvester_config(self.root_path)
async def update_harvester_config(
self,
*,
use_gpu_harvesting: Optional[bool] = None,
gpu_index: Optional[int] = None,
enforce_gpu_index: Optional[bool] = None,
disable_cpu_affinity: Optional[bool] = None,
parallel_decompressers_count: Optional[int] = None,
decompresser_thread_count: Optional[int] = None,
recursive_plot_scan: Optional[bool] = None,
refresh_parameter_interval_seconds: Optional[uint32] = None,
) -> bool:
refresh_parameter: Optional[PlotsRefreshParameter] = None
if refresh_parameter_interval_seconds is not None:
refresh_parameter = PlotsRefreshParameter(
interval_seconds=refresh_parameter_interval_seconds,
retry_invalid_seconds=self.plot_manager.refresh_parameter.retry_invalid_seconds,
batch_size=self.plot_manager.refresh_parameter.batch_size,
batch_sleep_milliseconds=self.plot_manager.refresh_parameter.batch_sleep_milliseconds,
)
update_harvester_config(
self.root_path,
use_gpu_harvesting=use_gpu_harvesting,
gpu_index=gpu_index,
enforce_gpu_index=enforce_gpu_index,
disable_cpu_affinity=disable_cpu_affinity,
parallel_decompressers_count=parallel_decompressers_count,
decompresser_thread_count=decompresser_thread_count,
recursive_plot_scan=recursive_plot_scan,
refresh_parameter=refresh_parameter,
)
return True
def set_server(self, server: ChiaServer) -> None:
self._server = server

View File

@ -131,6 +131,22 @@ class HarvesterAPI:
proof_xs = plot_info.prover.get_full_proof(
sp_challenge_hash, index, self.harvester.parallel_read
)
except RuntimeError as e:
if str(e) == "GRResult_NoProof received":
self.harvester.log.info(
f"Proof dropped due to line point compression for {filename}"
)
self.harvester.log.info(
f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, "
f"plot_info: {plot_info}"
)
else:
self.harvester.log.error(f"Exception fetching full proof for {filename}. {e}")
self.harvester.log.error(
f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, "
f"plot_info: {plot_info}"
)
continue
except Exception as e:
self.harvester.log.error(f"Exception fetching full proof for {filename}. {e}")
self.harvester.log.error(
@ -198,6 +214,7 @@ class HarvesterAPI:
awaitables.append(lookup_challenge(try_plot_filename, try_plot_info))
self.harvester.log.debug(f"new_signage_point_harvester {passed} plots passed the plot filter")
time_taken = time.time() - start
# Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
total_proofs_found = 0
for filename_sublist_awaitable in asyncio.as_completed(awaitables):
@ -205,19 +222,19 @@ class HarvesterAPI:
time_taken = time.time() - start
if time_taken > 5:
self.harvester.log.warning(
f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds "
f"to minimize risk of losing rewards."
f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds"
f" to minimize risk of losing rewards."
)
else:
pass
# If you want additional logs, uncomment the following line
# self.harvester.log.debug(f"Looking up qualities on {filename} took: {time_taken}")
# self.harvester.log.info(f"Looking up qualities on {filename} took: {time_taken}")
for response in sublist:
total_proofs_found += 1
msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response)
await peer.send_message(msg)
now = uint64(int(time.time()))
farming_info = FarmingInfo(
new_challenge.challenge_hash,
new_challenge.sp_hash,
@ -225,13 +242,14 @@ class HarvesterAPI:
uint32(passed),
uint32(total_proofs_found),
uint32(total),
uint64(time_taken * 1_000_000), # nano seconds,
)
pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info)
await peer.send_message(pass_msg)
found_time = time.time() - start
self.harvester.log.info(
f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..."
f" Found {total_proofs_found} proofs. Time: {found_time:.5f} s. "
f" Found {total_proofs_found} proofs. Time: {time_taken:.5f} s. "
f"Total {self.harvester.plot_manager.plot_count()} plots"
)
self.harvester.state_changed(
@ -241,7 +259,7 @@ class HarvesterAPI:
"total_plots": self.harvester.plot_manager.plot_count(),
"found_proofs": total_proofs_found,
"eligible_plots": len(awaitables),
"time": found_time,
"time": time_taken,
},
)
@ -298,6 +316,7 @@ class HarvesterAPI:
async def request_plots(self, _: harvester_protocol.RequestPlots) -> Message:
plots_response = []
plots, failed_to_open_filenames, no_key_filenames = self.harvester.get_plots()
for plot in plots:
plots_response.append(
Plot(
@ -309,9 +328,9 @@ class HarvesterAPI:
plot["plot_public_key"],
plot["file_size"],
plot["time_modified"],
plot["compression_level"],
)
)
response = harvester_protocol.RespondPlots(plots_response, failed_to_open_filenames, no_key_filenames)
return make_msg(ProtocolMessageTypes.respond_plots, response)

View File

@ -7,6 +7,7 @@ from typing import Any, Awaitable, Callable, Collection, Dict, List, Optional
from typing_extensions import Protocol
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, expected_plot_size
from chia.plot_sync.delta import Delta, PathListDelta, PlotListDelta
from chia.plot_sync.exceptions import (
InvalidIdentifierError,
@ -17,6 +18,7 @@ from chia.plot_sync.exceptions import (
SyncIdsMatchError,
)
from chia.plot_sync.util import ErrorCodes, State, T_PlotSyncMessage
from chia.plotting.util import HarvestingMode
from chia.protocols.harvester_protocol import (
Plot,
PlotSyncDone,
@ -82,7 +84,9 @@ class Receiver:
_keys_missing: List[str]
_duplicates: List[str]
_total_plot_size: int
_total_effective_plot_size: float
_update_callback: ReceiverUpdateCallback
_harvesting_mode: Optional[HarvestingMode]
def __init__(
self,
@ -97,11 +101,13 @@ class Receiver:
self._keys_missing = []
self._duplicates = []
self._total_plot_size = 0
self._total_effective_plot_size = float(0)
self._update_callback = update_callback
self._harvesting_mode = None
async def trigger_callback(self, update: Optional[Delta] = None) -> None:
async def trigger_callback(self, delta: Optional[Delta] = None) -> None:
try:
await self._update_callback(self._connection.peer_node_id, update)
await self._update_callback(self._connection.peer_node_id, delta)
except Exception as e:
log.error(f"_update_callback: node_id {self.connection().peer_node_id}, raised {e}")
@ -114,6 +120,8 @@ class Receiver:
self._keys_missing.clear()
self._duplicates.clear()
self._total_plot_size = 0
self._total_effective_plot_size = float(0)
self._harvesting_mode = None
def connection(self) -> WSChiaConnection:
return self._connection
@ -142,6 +150,12 @@ class Receiver:
def total_plot_size(self) -> int:
return self._total_plot_size
def total_effective_plot_size(self) -> float:
return self._total_effective_plot_size
def harvesting_mode(self) -> Optional[HarvestingMode]:
return self._harvesting_mode
async def _process(
self, method: Callable[[T_PlotSyncMessage], Any], message_type: ProtocolMessageTypes, message: T_PlotSyncMessage
) -> None:
@ -196,6 +210,7 @@ class Receiver:
self._current_sync.delta.clear()
self._current_sync.state = State.loaded
self._current_sync.plots_total = data.plot_file_count
self._harvesting_mode = HarvestingMode(data.harvesting_mode)
self._current_sync.bump_next_message_id()
async def sync_started(self, data: PlotSyncStart) -> None:
@ -329,6 +344,9 @@ class Receiver:
self._keys_missing = self._current_sync.delta.keys_missing.additions.copy()
self._duplicates = self._current_sync.delta.duplicates.additions.copy()
self._total_plot_size = sum(plot.file_size for plot in self._plots.values())
self._total_effective_plot_size = sum(
UI_ACTUAL_SPACE_CONSTANT_FACTOR * int(expected_plot_size(plot.size)) for plot in self._plots.values()
)
# Save current sync as last sync and create a new current sync
self._last_sync = self._current_sync
self._current_sync = Sync()
@ -357,6 +375,8 @@ class Receiver:
"no_key_filenames": get_list_or_len(self._keys_missing, counts_only),
"duplicates": get_list_or_len(self._duplicates, counts_only),
"total_plot_size": self._total_plot_size,
"total_effective_plot_size": int(self._total_effective_plot_size),
"syncing": syncing,
"last_sync_time": self._last_sync.time_done,
"harvesting_mode": self._harvesting_mode,
}

View File

@ -13,7 +13,7 @@ from typing_extensions import Protocol
from chia.plot_sync.exceptions import AlreadyStartedError, InvalidConnectionTypeError
from chia.plot_sync.util import Constants
from chia.plotting.manager import PlotManager
from chia.plotting.util import PlotInfo
from chia.plotting.util import HarvestingMode, PlotInfo
from chia.protocols.harvester_protocol import (
Plot,
PlotSyncDone,
@ -45,6 +45,7 @@ def _convert_plot_info_list(plot_infos: List[PlotInfo]) -> List[Plot]:
plot_public_key=plot_info.plot_public_key,
file_size=uint64(plot_info.file_size),
time_modified=uint64(int(plot_info.time_modified)),
compression_level=plot_info.prover.get_compresion_level(),
)
)
return converted
@ -98,8 +99,9 @@ class Sender:
_stop_requested = False
_task: Optional[asyncio.Task[None]]
_response: Optional[ExpectedResponse]
_harvesting_mode: HarvestingMode
def __init__(self, plot_manager: PlotManager) -> None:
def __init__(self, plot_manager: PlotManager, harvesting_mode: HarvestingMode) -> None:
self._plot_manager = plot_manager
self._connection = None
self._sync_id = uint64(0)
@ -109,6 +111,7 @@ class Sender:
self._stop_requested = False
self._task = None
self._response = None
self._harvesting_mode = harvesting_mode
def __str__(self) -> str:
return f"sync_id {self._sync_id}, next_message_id {self._next_message_id}, messages {len(self._messages)}"
@ -267,8 +270,14 @@ class Sender:
sync_id = sync_id + 1
log.debug(f"sync_start {sync_id}")
self._sync_id = uint64(sync_id)
self._add_message(
ProtocolMessageTypes.plot_sync_start, PlotSyncStart, initial, self._last_sync_id, uint32(int(count))
ProtocolMessageTypes.plot_sync_start,
PlotSyncStart,
initial,
self._last_sync_id,
uint32(int(count)),
self._harvesting_mode,
)
def process_batch(self, loaded: List[PlotInfo], remaining: int) -> None:

View File

@ -52,6 +52,26 @@ def meets_memory_requirement(plotters_root_path: Path) -> Tuple[bool, Optional[s
return have_enough_memory, warning_string
def is_cudaplot_available(plotters_root_path: Path) -> bool:
bladebit_executable_path = get_bladebit_executable_path(plotters_root_path)
if not bladebit_executable_path.exists():
return False
cuda_available = False
try:
proc = run_command(
[os.fspath(bladebit_executable_path), "cudacheck"],
"Failed to call bladebit with cudacheck command",
capture_output=True,
text=True,
check=False,
)
cuda_available = proc.returncode == 0
except Exception as e:
print(f"Failed to determine whether bladebit supports cuda: {e}")
finally:
return cuda_available
def get_bladebit_src_path(plotters_root_path: Path) -> Path:
return plotters_root_path / BLADEBIT_PLOTTER_DIR
@ -60,37 +80,52 @@ def get_bladebit_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "bladebit"
def get_bladebit_exec_venv_path() -> Optional[Path]:
def get_bladebit_exec_path(with_cuda: Optional[bool] = None):
if with_cuda:
return "bladebit_cuda.exe" if sys.platform in ["win32", "cygwin"] else "bladebit_cuda"
return "bladebit.exe" if sys.platform in ["win32", "cygwin"] else "bladebit"
def get_bladebit_exec_venv_path(with_cuda: Optional[bool] = None) -> Optional[Path]:
venv_bin_path = get_venv_bin()
if not venv_bin_path:
return None
if sys.platform in ["win32", "cygwin"]:
return venv_bin_path / "bladebit.exe"
else:
return venv_bin_path / "bladebit"
bladebit_exec = get_bladebit_exec_path(with_cuda)
return venv_bin_path / bladebit_exec
def get_bladebit_exec_src_path(plotters_root_path: Path) -> Path:
def get_bladebit_exec_src_path(plotters_root_path: Path, with_cuda: Optional[bool] = None) -> Path:
bladebit_src_dir = get_bladebit_src_path(plotters_root_path)
build_dir = "build/Release" if sys.platform in ["win32", "cygwin"] else "build"
bladebit_exec = "bladebit.exe" if sys.platform in ["win32", "cygwin"] else "bladebit"
bladebit_exec = get_bladebit_exec_path(with_cuda)
return bladebit_src_dir / build_dir / bladebit_exec
def get_bladebit_exec_package_path() -> Path:
def get_bladebit_exec_package_path(with_cuda: Optional[bool] = None) -> Path:
bladebit_package_dir = get_bladebit_package_path()
bladebit_exec = "bladebit.exe" if sys.platform in ["win32", "cygwin"] else "bladebit"
bladebit_exec = get_bladebit_exec_path(with_cuda)
return bladebit_package_dir / bladebit_exec
def get_bladebit_executable_path(plotters_root_path: Path) -> Path:
bladebit_exec_venv_path = get_bladebit_exec_venv_path()
# Search for bladebit executable which supports CUDA at the first priority
bladebit_exec_venv_path = get_bladebit_exec_venv_path(with_cuda=True)
if bladebit_exec_venv_path is not None and bladebit_exec_venv_path.exists():
return bladebit_exec_venv_path
bladebit_exec_src_path = get_bladebit_exec_src_path(plotters_root_path)
bladebit_exec_src_path = get_bladebit_exec_src_path(plotters_root_path, with_cuda=True)
if bladebit_exec_src_path.exists():
return bladebit_exec_src_path
return get_bladebit_exec_package_path()
bladebit_exec_package_path = get_bladebit_exec_package_path(with_cuda=True)
if bladebit_exec_package_path.exists():
return bladebit_exec_package_path
bladebit_exec_venv_path = get_bladebit_exec_venv_path(with_cuda=False)
if bladebit_exec_venv_path is not None and bladebit_exec_venv_path.exists():
return bladebit_exec_venv_path
bladebit_exec_src_path = get_bladebit_exec_src_path(plotters_root_path, with_cuda=False)
if bladebit_exec_src_path.exists():
return bladebit_exec_src_path
return get_bladebit_exec_package_path(with_cuda=False)
def get_bladebit_version(plotters_root_path: Path):
@ -122,6 +157,7 @@ def get_bladebit_install_info(plotters_root_path: Path) -> Optional[Dict[str, An
info: Dict[str, Any] = {"display_name": "BladeBit Plotter"}
installed: bool = False
supported: bool = is_bladebit_supported()
cuda_available = is_cudaplot_available(plotters_root_path)
bladebit_executable_path = get_bladebit_executable_path(plotters_root_path)
if bladebit_executable_path.exists():
@ -147,9 +183,33 @@ def get_bladebit_install_info(plotters_root_path: Path) -> Optional[Dict[str, An
if memory_warning is not None:
info["bladebit_memory_warning"] = memory_warning
info["cuda_support"] = cuda_available
return info
# @TODO Set valid progress logs
progress_bladebit_cuda = {
"Finished F1 sort": 0.01,
"Finished forward propagating table 2": 0.06,
"Finished forward propagating table 3": 0.12,
"Finished forward propagating table 4": 0.2,
"Finished forward propagating table 5": 0.28,
"Finished forward propagating table 6": 0.36,
"Finished forward propagating table 7": 0.42,
"Finished prunning table 6": 0.43,
"Finished prunning table 5": 0.48,
"Finished prunning table 4": 0.51,
"Finished prunning table 3": 0.55,
"Finished prunning table 2": 0.58,
"Finished compressing tables 1 and 2": 0.66,
"Finished compressing tables 2 and 3": 0.73,
"Finished compressing tables 3 and 4": 0.79,
"Finished compressing tables 4 and 5": 0.85,
"Finished compressing tables 5 and 6": 0.92,
"Finished compressing tables 6 and 7": 0.98,
}
progress_bladebit_ram = {
"Finished F1 sort": 0.01,
"Finished forward propagating table 2": 0.06,
@ -171,7 +231,6 @@ progress_bladebit_ram = {
"Finished compressing tables 6 and 7": 0.98,
}
progress_bladebit_disk = {
# "Running Phase 1": 0.01,
"Finished f1 generation in ": 0.01,
@ -231,7 +290,10 @@ def plot_bladebit(args, chia_root_path, root_path):
args.connect_to_daemon,
)
)
plot_type = "ramplot" if args.plot_type == "ramplot" else "diskplot"
if args.plot_type == "ramplot" or args.plot_type == "cudaplot":
plot_type = args.plot_type
else:
plot_type = "diskplot"
call_args = [
os.fspath(bladebit_executable_path),
"--threads",
@ -261,6 +323,14 @@ def plot_bladebit(args, chia_root_path, root_path):
call_args.append("--no-cpu-affinity")
if args.verbose:
call_args.append("--verbose")
if (
"compress" in args
and args.compress is not None
and str(args.compress).isdigit()
and int(version_or_exception[0]) >= 3
):
call_args.append("--compress")
call_args.append(str(args.compress))
call_args.append(plot_type)
@ -297,11 +367,21 @@ def plot_bladebit(args, chia_root_path, root_path):
call_args.append("--no-t1-direct")
if "no_t2_direct" in args and args.no_t2_direct:
call_args.append("--no-t2-direct")
if "device" in args and str(args.device).isdigit():
call_args.append("--device")
call_args.append(args.device)
if "no_direct_downloads" in args and args.no_direct_downloads:
call_args.append("--no-direct-downloads")
call_args.append(args.finaldir)
try:
progress = progress_bladebit_ram if plot_type == "ramplot" else progress_bladebit_disk
if plot_type == "cudaplot":
progress = progress_bladebit_cuda
elif plot_type == "ramplot":
progress = progress_bladebit_ram
else:
progress = progress_bladebit_disk
asyncio.run(run_plotter(chia_root_path, args.plotter, call_args, progress))
except Exception as e:
print(f"Exception while plotting: {e} {type(e)}")

View File

@ -49,6 +49,9 @@ class Options(Enum):
BLADEBIT_ALTERNATE = 34
BLADEBIT_NO_T1_DIRECT = 35
BLADEBIT_NO_T2_DIRECT = 36
COMPRESSION = 37
BLADEBIT_DEVICE_INDEX = 38
BLADEBIT_NO_DIRECT_DOWNLOADS = 39
chia_plotter_options = [
@ -71,6 +74,7 @@ chia_plotter_options = [
Options.EXCLUDE_FINAL_DIR,
Options.CONNECT_TO_DAEMON,
Options.FINAL_DIR,
Options.COMPRESSION,
]
madmax_plotter_options = [
@ -91,6 +95,24 @@ madmax_plotter_options = [
Options.FINAL_DIR,
]
bladebit_cuda_plotter_options = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
Options.FARMERKEY,
Options.POOLKEY,
Options.POOLCONTRACT,
Options.ID,
Options.BLADEBIT_WARMSTART,
Options.BLADEBIT_NONUMA,
Options.BLADEBIT_NO_CPU_AFFINITY,
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
Options.FINAL_DIR,
Options.COMPRESSION,
Options.BLADEBIT_DEVICE_INDEX,
Options.BLADEBIT_NO_DIRECT_DOWNLOADS,
]
bladebit_ram_plotter_options = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
@ -104,6 +126,7 @@ bladebit_ram_plotter_options = [
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
Options.FINAL_DIR,
Options.COMPRESSION,
]
bladebit_disk_plotter_options = [
@ -132,6 +155,7 @@ bladebit_disk_plotter_options = [
Options.MEMO,
Options.BLADEBIT_NO_T1_DIRECT,
Options.BLADEBIT_NO_T2_DIRECT,
Options.COMPRESSION,
]
@ -416,6 +440,27 @@ def build_parser(subparsers, root_path, option_list, name, plotter_desc):
help="Disable direct I/O on the temp 2 directory",
default=False,
)
if option is Options.COMPRESSION:
parser.add_argument(
"--compress",
type=int,
help="Compression level",
default=1,
)
if option is Options.BLADEBIT_DEVICE_INDEX:
parser.add_argument(
"--device",
type=int,
help="The CUDA device index",
default=0,
)
if option is Options.BLADEBIT_NO_DIRECT_DOWNLOADS:
parser.add_argument(
"--no-direct-downloads",
action="store_true",
help="Don't allocate host tables using pinned buffers",
default=False,
)
def call_plotters(root_path: Path, args):
@ -444,6 +489,7 @@ def call_plotters(root_path: Path, args):
bladebit_parser = subparsers.add_parser("bladebit", help="Create a plot with bladebit")
subparsers_bb = bladebit_parser.add_subparsers(dest="plot_type", required=True)
build_parser(subparsers_bb, root_path, bladebit_cuda_plotter_options, "cudaplot", "Creat a plot using CUDA")
build_parser(subparsers_bb, root_path, bladebit_ram_plotter_options, "ramplot", "Create a plot using RAM")
build_parser(subparsers_bb, root_path, bladebit_disk_plotter_options, "diskplot", "Create a plot using disk")
@ -490,11 +536,7 @@ def get_available_plotters(root_path) -> Dict[str, Any]:
if chiapos is not None:
plotters["chiapos"] = chiapos
if bladebit and bladebit.get("version") is not None:
bladebit_major_version = bladebit["version"].split(".")[0]
if bladebit_major_version == "2":
plotters["bladebit2"] = bladebit
else:
plotters["bladebit"] = bladebit
plotters["bladebit"] = bladebit
if madmax is not None:
plotters["madmax"] = madmax
@ -506,8 +548,9 @@ def show_plotters_version(root_path: Path):
if "chiapos" in info and "version" in info["chiapos"]:
print(f"chiapos: {info['chiapos']['version']}")
if "bladebit" in info and "version" in info["bladebit"]:
print(f"bladebit: {info['bladebit']['version']}")
if "bladebit2" in info and "version" in info["bladebit2"]:
print(f"bladebit: {info['bladebit2']['version']}")
if info["bladebit"]["cuda_support"]:
print(f"bladebit: {info['bladebit']['version']} (CUDA ready)")
else:
print(f"bladebit: {info['bladebit']['version']}")
if "madmax" in info and "version" in info["madmax"]:
print(f"madmax: {info['madmax']['version']}")

View File

@ -21,7 +21,7 @@ from chia.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: int = 1
CURRENT_VERSION: int = 2
@streamable

View File

@ -1,16 +1,20 @@
from __future__ import annotations
import concurrent.futures
import logging
import multiprocessing
from collections import Counter
from pathlib import Path
from time import sleep, time
from typing import List, Optional
import more_itertools
from blspy import G1Element
from chiapos import Verifier
from chia.plotting.manager import PlotManager
from chia.plotting.util import (
PlotInfo,
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
@ -50,6 +54,27 @@ def check_plots(
refresh_parameter=plot_refresh_parameter,
refresh_callback=plot_refresh_callback,
)
context_count = config["harvester"].get("parallel_decompressers_count", 5)
thread_count = config["harvester"].get("decompresser_thread_count", 0)
if thread_count == 0:
thread_count = multiprocessing.cpu_count() // 2
disable_cpu_affinity = config["harvester"].get("disable_cpu_affinity", False)
max_compression_level_allowed = config["harvester"].get("max_compression_level_allowed", 7)
use_gpu_harvesting = config["harvester"].get("use_gpu_harvesting", False)
gpu_index = config["harvester"].get("gpu_index", 0)
enforce_gpu_index = config["harvester"].get("enforce_gpu_index", False)
plot_manager.configure_decompresser(
context_count,
thread_count,
disable_cpu_affinity,
max_compression_level_allowed,
use_gpu_harvesting,
gpu_index,
enforce_gpu_index,
)
if num is not None:
if num == 0:
log.warning("Not opening plot files")
@ -110,7 +135,12 @@ def check_plots(
bad_plots_list: List[Path] = []
with plot_manager:
for plot_path, plot_info in plot_manager.plots.items():
def process_plot(plot_path: Path, plot_info: PlotInfo, num_start: int, num_end: int) -> None:
nonlocal total_good_plots
nonlocal total_size
nonlocal bad_plots_list
pr = plot_info.prover
log.info(f"Testing plot {plot_path} k={pr.get_size()}")
if plot_info.pool_public_key is not None:
@ -140,10 +170,10 @@ def check_plots(
if quality_spent_time > 5000:
log.warning(
f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 5 seconds "
f"to minimize risk of losing rewards."
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tLooking up qualities took: {quality_spent_time} ms.")
log.info(f"\tLooking up qualities took: {quality_spent_time} ms. Filepath: {plot_path}")
# Other plot errors cause get_full_proof or validate_proof to throw an AssertionError
try:
@ -153,35 +183,65 @@ def check_plots(
if proof_spent_time > 15000:
log.warning(
f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds "
f"to minimize risk of losing rewards."
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tFinding proof took: {proof_spent_time} ms")
total_proofs += 1
log.info(f"\tFinding proof took: {proof_spent_time} ms. Filepath: {plot_path}")
ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof)
assert quality_str == ver_quality_str
if quality_str == ver_quality_str:
total_proofs += 1
else:
log.warning(
f"\tQuality doesn't match with proof. Filepath: {plot_path} "
"This can occasionally happen with a compressed plot."
)
except AssertionError as e:
log.error(f"{type(e)}: {e} error in proving/verifying for plot {plot_path}")
log.error(
f"{type(e)}: {e} error in proving/verifying for plot {plot_path}. Filepath: {plot_path}"
)
caught_exception = True
quality_start_time = int(round(time() * 1000))
except KeyboardInterrupt:
log.warning("Interrupted, closing")
return None
return
except SystemExit:
log.warning("System is shutting down.")
return None
return
except RuntimeError as e:
if str(e) == "GRResult_NoProof received":
log.info(f"Proof dropped due to line point compression. Filepath: {plot_path}")
continue
else:
log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}")
caught_exception = True
except Exception as e:
log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}")
caught_exception = True
if caught_exception is True:
break
if total_proofs > 0 and caught_exception is False:
log.info(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}")
log.info(
f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}. "
f"Filepath: {plot_path}"
)
total_good_plots[pr.get_size()] += 1
total_size += plot_path.stat().st_size
else:
log.error(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}")
log.error(
f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)} "
f"Filepath: {plot_path}"
)
bad_plots_list.append(plot_path)
with concurrent.futures.ThreadPoolExecutor() as executor:
for batch in more_itertools.chunked(plot_manager.plots.items(), context_count):
futures = []
for plot_path, plot_info in batch:
futures.append(executor.submit(process_plot, plot_path, plot_info, num_start, num_end))
for future in concurrent.futures.as_completed(futures):
_ = future.result()
log.info("")
log.info("")
log.info("Summary")

View File

@ -8,12 +8,19 @@ from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import chiapos
from blspy import G1Element
from chiapos import DiskProver
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, expected_plot_size
from chia.plotting.cache import Cache, CacheEntry
from chia.plotting.util import PlotInfo, PlotRefreshEvents, PlotRefreshResult, PlotsRefreshParameter, get_plot_filenames
from chia.plotting.util import (
HarvestingMode,
PlotInfo,
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
get_plot_filenames,
)
from chia.util.generator_tools import list_to_batches
log = logging.getLogger(__name__)
@ -38,6 +45,7 @@ class PlotManager:
_refreshing_enabled: bool
_refresh_callback: Callable
_initial: bool
max_compression_level_allowed: int
def __init__(
self,
@ -55,7 +63,11 @@ class PlotManager:
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
# Since `compression_level` property was added to Cache structure,
# previous cache file formats needs to be reset
# When user downgrades harvester, it looks 'plot_manager.dat` while
# latest harvester reads/writes 'plot_manager_v2.dat`
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager_v2.dat")
self.match_str = match_str
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
@ -66,6 +78,7 @@ class PlotManager:
self._refreshing_enabled = False
self._refresh_callback = refresh_callback
self._initial = True
self.max_compression_level_allowed = 0
def __enter__(self):
self._lock.acquire()
@ -73,6 +86,41 @@ class PlotManager:
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def configure_decompresser(
self,
context_count: int,
thread_count: int,
disable_cpu_affinity: bool,
max_compression_level_allowed: int,
use_gpu_harvesting: bool,
gpu_index: int,
enforce_gpu_index: bool,
) -> HarvestingMode:
decompresser_context_queue = getattr(chiapos, "decompresser_context_queue")
if max_compression_level_allowed > 7:
log.error(
"Currently only compression levels up to 7 are allowed, "
f"while {max_compression_level_allowed} was specified."
"Setting max_compression_level_allowed to 7."
)
max_compression_level_allowed = 7
is_using_gpu = decompresser_context_queue.init(
context_count,
thread_count,
disable_cpu_affinity,
max_compression_level_allowed,
use_gpu_harvesting,
gpu_index,
enforce_gpu_index,
)
if not is_using_gpu and use_gpu_harvesting:
log.error(
"GPU harvesting failed initialization. "
f"Falling back to CPU harvesting: {context_count} decompressers count, {thread_count} threads."
)
self.max_compression_level_allowed = max_compression_level_allowed
return HarvestingMode.GPU if is_using_gpu else HarvestingMode.CPU
def reset(self) -> None:
with self:
self.last_refresh_time = time.time()
@ -271,22 +319,33 @@ class PlotManager:
cache_entry = self.cache.get(file_path)
cache_hit = cache_entry is not None
if not cache_hit:
prover = DiskProver(str(file_path))
prover = chiapos.DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
expected_size = expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
if (
prover.get_size() >= 30
and stat_info.st_size < 0.98 * expected_size
and prover.get_compresion_level() == 0
):
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024 ** 3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
if prover.get_compresion_level() > self.max_compression_level_allowed:
log.warning(
f"Not farming plot {file_path}. Plot compression level: {prover.get_compresion_level()}, "
f"max compression level allowed: {self.max_compression_level_allowed}."
)
return None
cache_entry = CacheEntry.from_disk_prover(prover)
self.cache.update(file_path, cache_entry)

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import logging
from dataclasses import dataclass, field
from enum import Enum
from enum import Enum, IntEnum
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
@ -84,6 +84,11 @@ class Params:
stripe_size: int = 65536
class HarvestingMode(IntEnum):
CPU = 1
GPU = 2
def get_plot_directories(root_path: Path, config: Dict = None) -> List[str]:
if config is None:
config = load_config(root_path, "config.yaml")
@ -146,6 +151,59 @@ def remove_plot(path: Path):
path.unlink()
def get_harvester_config(root_path: Path, config: Dict = None) -> Dict:
if config is None:
config = load_config(root_path, "config.yaml")
plots_refresh_parameter = config["harvester"].get("plots_refresh_parameter")
if plots_refresh_parameter is None:
plots_refresh_parameter = PlotsRefreshParameter().to_json_dict()
return {
"use_gpu_harvesting": config["harvester"].get("use_gpu_harvesting"),
"gpu_index": config["harvester"].get("gpu_index"),
"enforce_gpu_index": config["harvester"].get("enforce_gpu_index"),
"disable_cpu_affinity": config["harvester"].get("disable_cpu_affinity"),
"parallel_decompressers_count": config["harvester"].get("parallel_decompressers_count"),
"decompresser_thread_count": config["harvester"].get("decompresser_thread_count"),
"recursive_plot_scan": config["harvester"].get("recursive_plot_scan"),
"plots_refresh_parameter": plots_refresh_parameter,
}
def update_harvester_config(
root_path: Path,
*,
use_gpu_harvesting: Optional[bool] = None,
gpu_index: Optional[int] = None,
enforce_gpu_index: Optional[bool] = None,
disable_cpu_affinity: Optional[bool] = None,
parallel_decompressers_count: Optional[int] = None,
decompresser_thread_count: Optional[int] = None,
recursive_plot_scan: Optional[bool] = None,
refresh_parameter: Optional[PlotsRefreshParameter] = None,
):
with lock_and_load_config(root_path, "config.yaml") as config:
if use_gpu_harvesting is not None:
config["harvester"]["use_gpu_harvesting"] = use_gpu_harvesting
if gpu_index is not None:
config["harvester"]["gpu_index"] = gpu_index
if enforce_gpu_index is not None:
config["harvester"]["enforce_gpu_index"] = enforce_gpu_index
if disable_cpu_affinity is not None:
config["harvester"]["disable_cpu_affinity"] = disable_cpu_affinity
if parallel_decompressers_count is not None:
config["harvester"]["parallel_decompressers_count"] = parallel_decompressers_count
if decompresser_thread_count is not None:
config["harvester"]["decompresser_thread_count"] = decompresser_thread_count
if recursive_plot_scan is not None:
config["harvester"]["recursive_plot_scan"] = recursive_plot_scan
if refresh_parameter is not None:
config["harvester"]["plots_refresh_parameter"] = refresh_parameter.to_json_dict()
save_config(root_path, "config.yaml", config)
def get_filenames(directory: Path, recursive: bool) -> List[Path]:
try:
if not directory.exists():

View File

@ -60,6 +60,7 @@ class FarmingInfo(Streamable):
passed: uint32
proofs: uint32
total_plots: uint32
lookup_time: uint64
@streamable

View File

@ -83,6 +83,7 @@ class Plot(Streamable):
plot_public_key: G1Element
file_size: uint64
time_modified: uint64
compression_level: Optional[uint8]
@streamable
@ -114,11 +115,13 @@ class PlotSyncStart(Streamable):
initial: bool
last_sync_id: uint64
plot_file_count: uint32
harvesting_mode: uint8
def __str__(self) -> str:
return (
f"PlotSyncStart: identifier {self.identifier}, initial {self.initial}, "
f"last_sync_id {self.last_sync_id}, plot_file_count {self.plot_file_count}"
f"last_sync_id {self.last_sync_id}, plot_file_count {self.plot_file_count}, "
f"harvesting_mode {self.harvesting_mode}"
)

View File

@ -110,7 +110,9 @@ class FarmerRpcApi:
pass
elif change == "new_signage_point":
sp_hash = change_data["sp_hash"]
missing_signage_points = change_data["missing_signage_points"]
data = await self.get_signage_point({"sp_hash": sp_hash.hex()})
data["missing_signage_points"] = missing_signage_points
payloads.append(
create_payload_dict(
"new_signage_point",
@ -187,6 +189,23 @@ class FarmerRpcApi:
"metrics",
)
)
payloads.append(
create_payload_dict(
"submitted_partial",
change_data,
self.service_name,
"wallet_ui",
)
)
elif change == "failed_partial":
payloads.append(
create_payload_dict(
"failed_partial",
change_data,
self.service_name,
"wallet_ui",
)
)
elif change == "proof":
payloads.append(
create_payload_dict(
@ -283,7 +302,6 @@ class FarmerRpcApi:
pools_list = []
for p2_singleton_puzzle_hash, pool_dict in self.service.pool_state.items():
pool_state = pool_dict.copy()
pool_state["p2_singleton_puzzle_hash"] = p2_singleton_puzzle_hash.hex()
pool_state["plot_count"] = self.get_pool_contract_puzzle_hash_plot_count(p2_singleton_puzzle_hash)
pools_list.append(pool_state)
return {"pool_state": pools_list}

View File

@ -128,6 +128,7 @@ class FullNodeRpcApi:
"difficulty": 0,
"sub_slot_iters": 0,
"space": 0,
"average_block_time": 0,
"mempool_size": 0,
"mempool_cost": 0,
"mempool_min_fees": {
@ -166,17 +167,49 @@ class FullNodeRpcApi:
else:
sync_progress_height = uint32(0)
average_block_time = uint32(0)
space = {"space": uint128(0)}
if peak is not None and peak.height > 1:
newer_block_hex = peak.header_hash.hex()
# Average over the last day
header_hash = self.service.blockchain.height_to_hash(uint32(max(1, peak.height - 4608)))
assert header_hash is not None
older_block_hex = header_hash.hex()
older_header_hash = self.service.blockchain.height_to_hash(uint32(max(1, peak.height - 4608)))
assert older_header_hash is not None
older_block_hex = older_header_hash.hex()
space = await self.get_network_space(
{"newer_block_header_hash": newer_block_hex, "older_block_header_hash": older_block_hex}
)
else:
space = {"space": uint128(0)}
try:
newer_block: Optional[BlockRecord] = (
await self.get_block_record({"header_hash": peak.header_hash.hex()})
)["block_record"]
while newer_block is not None and newer_block.height > 0 and not newer_block.is_transaction_block:
newer_block = (await self.get_block_record({"header_hash": newer_block.prev_hash.hex()}))[
"block_record"
]
except ValueError:
newer_block = None
if newer_block is not None:
older_header_hash = self.service.blockchain.height_to_hash(uint32(max(1, newer_block.height - 4608)))
try:
older_block: Optional[BlockRecord] = (
(await self.get_block_record({"header_hash": older_header_hash.hex()}))["block_record"]
if older_header_hash is not None
else None
)
while older_block is not None and older_block.height > 0 and not older_block.is_transaction_block:
older_block = (await self.get_block_record({"header_hash": older_block.prev_hash.hex()}))[
"block_record"
]
except ValueError:
older_block = None
if older_block is not None and newer_block.timestamp is not None and older_block.timestamp is not None:
average_block_time = uint32(
(newer_block.timestamp - older_block.timestamp) / (newer_block.height - older_block.height)
)
if self.service.mempool_manager is not None:
mempool_size = self.service.mempool_manager.mempool.size()
@ -212,6 +245,7 @@ class FullNodeRpcApi:
"difficulty": difficulty,
"sub_slot_iters": sub_slot_iters,
"space": space["space"],
"average_block_time": average_block_time,
"mempool_size": mempool_size,
"mempool_cost": mempool_cost,
"mempool_fees": mempool_fees,

View File

@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional
from chia.harvester.harvester import Harvester
from chia.rpc.rpc_server import Endpoint, EndpointResult
from chia.util.ints import uint32
from chia.util.ws_message import WsRpcMessage, create_payload_dict
@ -20,6 +21,8 @@ class HarvesterRpcApi:
"/add_plot_directory": self.add_plot_directory,
"/get_plot_directories": self.get_plot_directories,
"/remove_plot_directory": self.remove_plot_directory,
"/get_harvester_config": self.get_harvester_config,
"/update_harvester_config": self.update_harvester_config,
}
async def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]] = None) -> List[WsRpcMessage]:
@ -35,6 +38,7 @@ class HarvesterRpcApi:
if change == "farming_info":
payloads.append(create_payload_dict("farming_info", change_data, self.service_name, "metrics"))
payloads.append(create_payload_dict("farming_info", change_data, self.service_name, "wallet_ui"))
if change == "add_connection":
payloads.append(create_payload_dict("add_connection", change_data, self.service_name, "metrics"))
@ -77,3 +81,56 @@ class HarvesterRpcApi:
if await self.service.remove_plot_directory(directory_name):
return {}
raise ValueError(f"Did not remove plot directory {directory_name}")
async def get_harvester_config(self, _: Dict[str, Any]) -> EndpointResult:
harvester_config = await self.service.get_harvester_config()
return {
"use_gpu_harvesting": harvester_config["use_gpu_harvesting"],
"gpu_index": harvester_config["gpu_index"],
"enforce_gpu_index": harvester_config["enforce_gpu_index"],
"disable_cpu_affinity": harvester_config["disable_cpu_affinity"],
"parallel_decompressers_count": harvester_config["parallel_decompressers_count"],
"decompresser_thread_count": harvester_config["decompresser_thread_count"],
"recursive_plot_scan": harvester_config["recursive_plot_scan"],
"refresh_parameter_interval_seconds": harvester_config["plots_refresh_parameter"].get("interval_seconds"),
}
async def update_harvester_config(self, request: Dict[str, Any]) -> EndpointResult:
use_gpu_harvesting: Optional[bool] = None
gpu_index: Optional[int] = None
enforce_gpu_index: Optional[bool] = None
disable_cpu_affinity: Optional[bool] = None
parallel_decompressers_count: Optional[int] = None
decompresser_thread_count: Optional[int] = None
recursive_plot_scan: Optional[bool] = None
refresh_parameter_interval_seconds: Optional[uint32] = None
if "use_gpu_harvesting" in request:
use_gpu_harvesting = bool(request["use_gpu_harvesting"])
if "gpu_index" in request:
gpu_index = int(request["gpu_index"])
if "enforce_gpu_index" in request:
enforce_gpu_index = bool(request["enforce_gpu_index"])
if "disable_cpu_affinity" in request:
disable_cpu_affinity = bool(request["disable_cpu_affinity"])
if "parallel_decompressers_count" in request:
parallel_decompressers_count = int(request["parallel_decompressers_count"])
if "decompresser_thread_count" in request:
decompresser_thread_count = int(request["decompresser_thread_count"])
if "recursive_plot_scan" in request:
recursive_plot_scan = bool(request["recursive_plot_scan"])
if "refresh_parameter_interval_seconds" in request:
refresh_parameter_interval_seconds = uint32(request["refresh_parameter_interval_seconds"])
if refresh_parameter_interval_seconds < 3:
raise ValueError(f"Plot refresh interval seconds({refresh_parameter_interval_seconds}) is too short")
await self.service.update_harvester_config(
use_gpu_harvesting=use_gpu_harvesting,
gpu_index=gpu_index,
enforce_gpu_index=enforce_gpu_index,
disable_cpu_affinity=disable_cpu_affinity,
parallel_decompressers_count=parallel_decompressers_count,
decompresser_thread_count=decompresser_thread_count,
recursive_plot_scan=recursive_plot_scan,
refresh_parameter_interval_seconds=refresh_parameter_interval_seconds,
)
return {}

View File

@ -31,3 +31,9 @@ class HarvesterRpcClient(RpcClient):
async def remove_plot_directory(self, dirname: str) -> bool:
return (await self.fetch("remove_plot_directory", {"dirname": dirname}))["success"]
async def get_harvester_config(self) -> Dict[str, Any]:
return await self.fetch("get_harvester_config", {})
async def update_harvester_config(self, config: Dict[str, Any]) -> bool:
return (await self.fetch("update_harvester_config", config))["success"]

View File

@ -2974,7 +2974,9 @@ class WalletRpcApi:
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
blocks_won = 0
last_height_farmed = 0
last_time_farmed = 0
for record in tx_records:
if record.wallet_id not in self.service.wallet_state_manager.wallets:
continue
@ -2987,13 +2989,17 @@ class WalletRpcApi:
# .get_farming_rewards() above queries for only confirmed records. This
# could be hinted by making TransactionRecord generic but streamable can't
# handle that presently. Existing code would have raised an exception
# anyways if this were to fail and we already have an assert below.
# anyway if this were to fail and we already have an assert below.
assert height is not None
timestamp = await self.service.get_timestamp_for_height(height)
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
base_farmer_reward = calculate_base_farmer_reward(height)
fee_amount += record.amount - base_farmer_reward
farmer_reward_amount += base_farmer_reward
blocks_won += 1
if height > last_height_farmed:
last_height_farmed = height
last_time_farmed = timestamp
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
@ -3003,6 +3009,8 @@ class WalletRpcApi:
"farmer_reward_amount": farmer_reward_amount,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
"last_time_farmed": last_time_farmed,
"blocks_won": blocks_won,
}
async def create_signed_transaction(self, request, hold_lock=True) -> EndpointResult:

View File

@ -201,6 +201,18 @@ harvester:
crt: "config/ssl/ca/chia_ca.crt"
key: "config/ssl/ca/chia_ca.key"
# Compressed harvesting.
parallel_decompressers_count: 1
# If set to 0, `decompresser_thread_count` will default to half of nproc available on the machine.
# A non-zero number overrides this default.
decompresser_thread_count: 0
disable_cpu_affinity: False
# Ignore compressed plots with compression level greater than this.
max_compression_level_allowed: 7
use_gpu_harvesting: False
gpu_index: 0
enforce_gpu_index: False
pool:
# Replace this with a real receive address
# xch_target_address: txch102gkhhzs60grx7cfnpng5n6rjecr89r86l5s8xux2za8k820cxsq64ssdg

View File

@ -18,19 +18,20 @@ class VersionedBlob(Streamable):
blob: bytes
def format_bytes(bytes: int) -> str:
if not isinstance(bytes, int) or bytes < 0:
def format_bytes(bytes_value: int, effective: bool = False) -> str:
if not isinstance(bytes_value, int) or bytes_value < 0:
return "Invalid"
LABELS = ("MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
BASE = 1024
value = bytes / BASE
value = bytes_value / BASE
for label in LABELS:
value /= BASE
if value < BASE:
return f"{value:.3f} {label}"
return f"{value:.3f} {label}e" if effective else f"{value:.3f} {label}"
return f"{value:.3f} {LABELS[-1]}"
label = f"{LABELS[-1]}e" if effective else LABELS[-1]
return f"{value:.3f} {label}"
def format_minutes(minutes: int) -> str:

View File

@ -12,30 +12,6 @@ def make_create_coin_condition(puzzle_hash, amount, memos: List[bytes]) -> List:
return condition
def make_assert_aggsig_condition(pubkey):
return [ConditionOpcode.AGG_SIG_UNSAFE, pubkey]
def make_assert_my_coin_id_condition(coin_name):
return [ConditionOpcode.ASSERT_MY_COIN_ID, coin_name]
def make_assert_absolute_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, block_index]
def make_assert_relative_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_RELATIVE, block_index]
def make_assert_absolute_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, time]
def make_assert_relative_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_RELATIVE, time]
def make_reserve_fee_condition(fee):
return [ConditionOpcode.RESERVE_FEE, fee]
@ -54,15 +30,3 @@ def make_create_coin_announcement(message):
def make_create_puzzle_announcement(message):
return [ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, message]
def make_assert_my_parent_id(parent_id):
return [ConditionOpcode.ASSERT_MY_PARENT_ID, parent_id]
def make_assert_my_puzzlehash(puzzlehash):
return [ConditionOpcode.ASSERT_MY_PUZZLEHASH, puzzlehash]
def make_assert_my_amount(amount):
return [ConditionOpcode.ASSERT_MY_AMOUNT, amount]

View File

@ -616,7 +616,7 @@ class VerifiedCredential(Streamable):
parent_name=parent_coin.parent_coin_info,
inner_puzzle_hash=create_viral_backdoor(
STANDARD_BRICK_PUZZLE_HASH,
inner_puzzle_hash,
bytes32(uncurry_puzzle(metadata_layer.args.at("rrrrf")).args.at("rrf").atom),
).get_tree_hash(),
amount=uint64(parent_coin.amount),
parent_proof_hash=None if parent_proof_hash == Program.to(None) else parent_proof_hash,

View File

@ -30,9 +30,7 @@ from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
solution_for_conditions,
)
from chia.wallet.puzzles.puzzle_utils import (
make_assert_absolute_seconds_exceeds_condition,
make_assert_coin_announcement,
make_assert_my_coin_id_condition,
make_assert_puzzle_announcement,
make_create_coin_announcement,
make_create_coin_condition,
@ -224,8 +222,6 @@ class Wallet:
def make_solution(
self,
primaries: List[Payment],
min_time=0,
me=None,
coin_announcements: Optional[Set[bytes]] = None,
coin_announcements_to_assert: Optional[Set[bytes32]] = None,
puzzle_announcements: Optional[Set[bytes]] = None,
@ -238,10 +234,6 @@ class Wallet:
if len(primaries) > 0:
for primary in primaries:
condition_list.append(make_create_coin_condition(primary.puzzle_hash, primary.amount, primary.memos))
if min_time > 0:
condition_list.append(make_assert_absolute_seconds_exceeds_condition(min_time))
if me:
condition_list.append(make_assert_my_coin_id_condition(me["id"]))
if fee:
condition_list.append(make_reserve_fee_condition(fee))
if coin_announcements:

View File

@ -9,10 +9,10 @@ dependencies = [
"aiofiles==23.1.0", # Async IO for files
"anyio==3.6.2",
"boto3==1.26.131", # AWS S3 for DL s3 plugin
"blspy==1.0.16", # Signature library
"blspy==2.0.2", # Signature library
"chiavdf==1.0.8", # timelord and vdf verification
"chiabip158==1.2", # bip158-style wallet filters
"chiapos==1.0.11", # proof of space
"chiapos==2.0.0b2", # proof of space
"clvm==0.9.7",
"clvm_tools==0.4.6", # Currying, Program.to, other conveniences
"chia_rs==0.2.7",

View File

@ -152,7 +152,7 @@ def db_version(request) -> int:
return request.param
@pytest.fixture(scope="function", params=[1000000, 3886635, 4200000, 5496000])
@pytest.fixture(scope="function", params=[1000000, 3886635, 4410000, 5496000])
def softfork_height(request) -> int:
return request.param

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from pytest import raises
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.consensus.pos_quality import _expected_plot_size
from chia.consensus.pos_quality import expected_plot_size
from chia.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
@ -90,7 +90,7 @@ class TestPotIterations:
uint8(35): 100,
uint8(36): 100,
}
farmer_space = {k: _expected_plot_size(uint8(k)) * count for k, count in farmer_ks.items()}
farmer_space = {k: expected_plot_size(uint8(k)) * count for k, count in farmer_ks.items()}
total_space = sum(farmer_space.values())
percentage_space = {k: float(sp / total_space) for k, sp in farmer_space.items()}
wins = {k: 0 for k in farmer_ks.keys()}

View File

@ -15,7 +15,7 @@ log = logging.getLogger(__name__)
def dummy_plot(path: str) -> Plot:
return Plot(path, uint8(32), bytes32(b"\00" * 32), G1Element(), None, G1Element(), uint64(0), uint64(0))
return Plot(path, uint8(32), bytes32(b"\00" * 32), G1Element(), None, G1Element(), uint64(0), uint64(0), uint8(0))
@pytest.mark.parametrize(

View File

@ -71,6 +71,7 @@ class ExpectedResult:
G1Element(),
uint64(0),
uint64(0),
uint8(0),
)
self.valid_count += len(list_plots)
@ -191,6 +192,7 @@ class Environment:
assert plot.prover.get_filename() == delta.valid.additions[path].filename
assert plot.prover.get_size() == delta.valid.additions[path].size
assert plot.prover.get_id() == delta.valid.additions[path].plot_id
assert plot.prover.get_compresion_level() == delta.valid.additions[path].compression_level
assert plot.pool_public_key == delta.valid.additions[path].pool_public_key
assert plot.pool_contract_puzzle_hash == delta.valid.additions[path].pool_contract_puzzle_hash
assert plot.plot_public_key == delta.valid.additions[path].plot_public_key
@ -251,6 +253,7 @@ class Environment:
assert plot_info.prover.get_filename() == receiver.plots()[str(path)].filename
assert plot_info.prover.get_size() == receiver.plots()[str(path)].size
assert plot_info.prover.get_id() == receiver.plots()[str(path)].plot_id
assert plot_info.prover.get_compresion_level() == receiver.plots()[str(path)].compression_level
assert plot_info.pool_public_key == receiver.plots()[str(path)].pool_public_key
assert plot_info.pool_contract_puzzle_hash == receiver.plots()[str(path)].pool_contract_puzzle_hash
assert plot_info.plot_public_key == receiver.plots()[str(path)].plot_public_key

View File

@ -10,9 +10,11 @@ from typing import Any, Callable, List, Tuple, Type, Union
import pytest
from blspy import G1Element
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, expected_plot_size
from chia.plot_sync.delta import Delta
from chia.plot_sync.receiver import Receiver, Sync
from chia.plot_sync.util import ErrorCodes, State
from chia.plotting.util import HarvestingMode
from chia.protocols.harvester_protocol import (
Plot,
PlotSyncDone,
@ -41,6 +43,9 @@ def assert_default_values(receiver: Receiver) -> None:
assert receiver.invalid() == []
assert receiver.keys_missing() == []
assert receiver.duplicates() == []
assert receiver.total_plot_size() == 0
assert receiver.total_effective_plot_size() == float(0)
assert receiver.harvesting_mode() is None
async def dummy_callback(_: bytes32, __: Delta) -> None:
@ -125,7 +130,7 @@ def post_function_validate(receiver: Receiver, data: Union[List[Plot], List[str]
@pytest.mark.asyncio
async def run_sync_step(receiver: Receiver, sync_step: SyncStepData) -> None:
async def run_plot_sync_step(receiver: Receiver, sync_step: SyncStepData) -> None:
assert receiver.current_sync().state == sync_step.state
last_sync_time_before = receiver._last_sync.time_done
# For the the list types invoke the trigger function in batches
@ -174,16 +179,28 @@ def plot_sync_setup() -> Tuple[Receiver, List[SyncStepData]]:
plot_public_key=G1Element(),
file_size=uint64(random.randint(0, 100)),
time_modified=uint64(0),
compression_level=uint8(0),
)
for x in path_list
]
# Manually add the plots we want to remove in tests
receiver._plots = {plot_info.filename: plot_info for plot_info in plot_info_list[0:10]}
receiver._total_plot_size = sum(plot.file_size for plot in receiver._plots.values())
receiver._total_plot_size = sum(plot.file_size for plot in receiver.plots().values())
receiver._total_effective_plot_size = sum(
UI_ACTUAL_SPACE_CONSTANT_FACTOR * int(expected_plot_size(plot.size)) for plot in receiver.plots().values()
)
sync_steps: List[SyncStepData] = [
SyncStepData(State.idle, receiver.sync_started, PlotSyncStart, False, uint64(0), uint32(len(plot_info_list))),
SyncStepData(
State.idle,
receiver.sync_started,
PlotSyncStart,
False,
uint64(0),
uint32(len(plot_info_list)),
uint8(HarvestingMode.CPU),
),
SyncStepData(State.loaded, receiver.process_loaded, PlotSyncPlotList, plot_info_list[10:20], True),
SyncStepData(State.removed, receiver.process_removed, PlotSyncPathList, path_list[0:10], True),
SyncStepData(State.invalid, receiver.process_invalid, PlotSyncPathList, path_list[20:30], True),
@ -201,7 +218,7 @@ def test_default_values() -> None:
@pytest.mark.asyncio
async def test_reset() -> None:
receiver, sync_steps = plot_sync_setup()
receiver, plot_sync_steps = plot_sync_setup()
connection_before = receiver.connection()
# Assign some dummy values
receiver._current_sync.state = State.done
@ -235,7 +252,7 @@ async def test_reset() -> None:
@pytest.mark.parametrize("counts_only", [True, False])
@pytest.mark.asyncio
async def test_to_dict(counts_only: bool) -> None:
receiver, sync_steps = plot_sync_setup()
receiver, plot_sync_steps = plot_sync_setup()
plot_sync_dict_1 = receiver.to_dict(counts_only)
assert get_list_or_len(plot_sync_dict_1["plots"], not counts_only) == 10
@ -243,6 +260,9 @@ async def test_to_dict(counts_only: bool) -> None:
assert get_list_or_len(plot_sync_dict_1["no_key_filenames"], not counts_only) == 0
assert get_list_or_len(plot_sync_dict_1["duplicates"], not counts_only) == 0
assert plot_sync_dict_1["total_plot_size"] == sum(plot.file_size for plot in receiver.plots().values())
assert plot_sync_dict_1["total_effective_plot_size"] == int(
sum(UI_ACTUAL_SPACE_CONSTANT_FACTOR * int(expected_plot_size(plot.size)) for plot in receiver.plots().values())
)
assert plot_sync_dict_1["syncing"] is None
assert plot_sync_dict_1["last_sync_time"] is None
assert plot_sync_dict_1["connection"] == {
@ -250,6 +270,7 @@ async def test_to_dict(counts_only: bool) -> None:
"host": receiver.connection().peer_info.host,
"port": receiver.connection().peer_info.port,
}
assert plot_sync_dict_1["harvesting_mode"] is None
# We should get equal dicts
assert plot_sync_dict_1 == receiver.to_dict(counts_only)
@ -257,14 +278,14 @@ async def test_to_dict(counts_only: bool) -> None:
assert plot_sync_dict_1 != receiver.to_dict(not counts_only)
expected_plot_files_processed: int = 0
expected_plot_files_total: int = sync_steps[State.idle].args[2]
expected_plot_files_total: int = plot_sync_steps[State.idle].args[2]
# Walk through all states from idle to done and run them with the test data and validate the sync progress
for state in State:
await run_sync_step(receiver, sync_steps[state])
await run_plot_sync_step(receiver, plot_sync_steps[state])
if state != State.idle and state != State.removed and state != State.done:
expected_plot_files_processed += len(sync_steps[state].args[0])
expected_plot_files_processed += len(plot_sync_steps[state].args[0])
sync_data = receiver.to_dict()["syncing"]
if state == State.done:
@ -278,16 +299,24 @@ async def test_to_dict(counts_only: bool) -> None:
assert sync_data == expected_sync_data
plot_sync_dict_3 = receiver.to_dict(counts_only)
assert get_list_or_len(sync_steps[State.loaded].args[0], counts_only) == plot_sync_dict_3["plots"]
assert get_list_or_len(plot_sync_steps[State.loaded].args[0], counts_only) == plot_sync_dict_3["plots"]
assert (
get_list_or_len(sync_steps[State.invalid].args[0], counts_only) == plot_sync_dict_3["failed_to_open_filenames"]
get_list_or_len(plot_sync_steps[State.invalid].args[0], counts_only)
== plot_sync_dict_3["failed_to_open_filenames"]
)
assert get_list_or_len(sync_steps[State.keys_missing].args[0], counts_only) == plot_sync_dict_3["no_key_filenames"]
assert get_list_or_len(sync_steps[State.duplicates].args[0], counts_only) == plot_sync_dict_3["duplicates"]
assert (
get_list_or_len(plot_sync_steps[State.keys_missing].args[0], counts_only)
== plot_sync_dict_3["no_key_filenames"]
)
assert get_list_or_len(plot_sync_steps[State.duplicates].args[0], counts_only) == plot_sync_dict_3["duplicates"]
assert plot_sync_dict_3["total_plot_size"] == sum(plot.file_size for plot in receiver.plots().values())
assert plot_sync_dict_3["total_effective_plot_size"] == int(
sum(UI_ACTUAL_SPACE_CONSTANT_FACTOR * int(expected_plot_size(plot.size)) for plot in receiver.plots().values())
)
assert plot_sync_dict_3["last_sync_time"] > 0
assert plot_sync_dict_3["syncing"] is None
assert plot_sync_steps[State.idle].args[3] == plot_sync_dict_3["harvesting_mode"]
# Trigger a repeated plot sync
await receiver.sync_started(
@ -296,6 +325,7 @@ async def test_to_dict(counts_only: bool) -> None:
False,
receiver.last_sync().sync_id,
uint32(1),
uint8(HarvestingMode.CPU),
)
)
assert receiver.to_dict()["syncing"] == {
@ -326,7 +356,7 @@ async def test_sync_flow() -> None:
# Walk through all states from idle to done and run them with the test data
for state in State:
await run_sync_step(receiver, sync_steps[state])
await run_plot_sync_step(receiver, sync_steps[state])
for plot_info in sync_steps[State.loaded].args[0]:
assert plot_info.filename in receiver.plots()
@ -358,13 +388,13 @@ async def test_invalid_ids() -> None:
receiver._last_sync.sync_id = uint64(1)
# Test "sync_started last doesn't match"
invalid_last_sync_id_param = PlotSyncStart(
plot_sync_identifier(uint64(0), uint64(0)), False, uint64(2), uint32(0)
plot_sync_identifier(uint64(0), uint64(0)), False, uint64(2), uint32(0), uint8(HarvestingMode.CPU)
)
await current_step.function(invalid_last_sync_id_param)
assert_error_response(receiver, ErrorCodes.invalid_last_sync_id)
# Test "last_sync_id == new_sync_id"
invalid_sync_id_match_param = PlotSyncStart(
plot_sync_identifier(uint64(1), uint64(0)), False, uint64(1), uint32(0)
plot_sync_identifier(uint64(1), uint64(0)), False, uint64(1), uint32(0), uint8(HarvestingMode.CPU)
)
await current_step.function(invalid_sync_id_match_param)
assert_error_response(receiver, ErrorCodes.sync_ids_match)

View File

@ -5,6 +5,7 @@ import pytest
from chia.plot_sync.exceptions import AlreadyStartedError, InvalidConnectionTypeError
from chia.plot_sync.sender import ExpectedResponse, Sender
from chia.plot_sync.util import Constants
from chia.plotting.util import HarvestingMode
from chia.protocols.harvester_protocol import PlotSyncIdentifier, PlotSyncResponse
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType
@ -14,7 +15,7 @@ from tests.plot_sync.util import get_dummy_connection, plot_sync_identifier
def test_default_values(bt: BlockTools) -> None:
sender = Sender(bt.plot_manager)
sender = Sender(bt.plot_manager, HarvestingMode.CPU)
assert sender._plot_manager == bt.plot_manager
assert sender._connection is None
assert sender._sync_id == uint64(0)
@ -24,11 +25,12 @@ def test_default_values(bt: BlockTools) -> None:
assert not sender._stop_requested
assert sender._task is None
assert sender._response is None
assert sender._harvesting_mode == HarvestingMode.CPU
def test_set_connection_values(bt: BlockTools) -> None:
farmer_connection = get_dummy_connection(NodeType.FARMER)
sender = Sender(bt.plot_manager)
sender = Sender(bt.plot_manager, HarvestingMode.CPU)
# Test invalid NodeType values
for connection_type in NodeType:
if connection_type != NodeType.FARMER:
@ -45,7 +47,7 @@ def test_set_connection_values(bt: BlockTools) -> None:
@pytest.mark.asyncio
async def test_start_stop_send_task(bt: BlockTools) -> None:
sender = Sender(bt.plot_manager)
sender = Sender(bt.plot_manager, HarvestingMode.CPU)
# Make sure starting/restarting works
for _ in range(2):
assert sender._task is None
@ -62,7 +64,7 @@ async def test_start_stop_send_task(bt: BlockTools) -> None:
def test_set_response(bt: BlockTools) -> None:
sender = Sender(bt.plot_manager)
sender = Sender(bt.plot_manager, HarvestingMode.CPU)
def new_expected_response(sync_id: int, message_id: int, message_type: ProtocolMessageTypes) -> ExpectedResponse:
return ExpectedResponse(message_type, plot_sync_identifier(uint64(sync_id), uint64(message_id)))

View File

@ -29,7 +29,7 @@ from chia.simulator.block_tools import BlockTools
from chia.simulator.time_out_assert import time_out_assert
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.generator_tools import list_to_batches
from chia.util.ints import int16, uint64
from chia.util.ints import int16, uint8, uint64
from tests.plot_sync.util import start_harvester_service
log = logging.getLogger(__name__)
@ -273,9 +273,12 @@ def create_example_plots(count: int) -> List[PlotInfo]:
def get_size(self) -> int:
return self.size
def get_compresion_level(self) -> uint8:
return uint8(0)
return [
PlotInfo(
prover=DiskProver(f"{x}", bytes32(token_bytes(32)), x % 255),
prover=DiskProver(f"{x}", bytes32(token_bytes(32)), 25 + x % 26),
pool_public_key=None,
pool_contract_puzzle_hash=None,
plot_public_key=G1Element(),

View File

@ -502,6 +502,7 @@ async def test_plot_info_caching(environment, bt):
assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id()
assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo()
assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size()
assert plot_manager.plots[path].prover.get_compresion_level() == plot_info.prover.get_compresion_level()
assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key
assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash
assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key

View File

@ -4,6 +4,7 @@ from __future__ import annotations
from blspy import G1Element, G2Element
from chia.plotting.util import HarvestingMode
from chia.protocols import (
farmer_protocol,
full_node_protocol,
@ -117,6 +118,7 @@ farming_info = farmer_protocol.FarmingInfo(
uint32(1390832181),
uint32(908923578),
uint32(2259819406),
uint64(3942498),
)
signed_values = farmer_protocol.SignedValues(
@ -749,6 +751,7 @@ plot = harvester_protocol.Plot(
),
uint64(3368414292564311420),
uint64(2573238947935295522),
uint8(0),
)
request_plots = harvester_protocol.RequestPlots()

View File

@ -48,6 +48,7 @@ farming_info_json: Dict[str, Any] = {
"passed": 1390832181,
"proofs": 908923578,
"total_plots": 2259819406,
"lookup_time": 3942498,
}
signed_values_json: Dict[str, Any] = {
@ -2070,6 +2071,7 @@ plot_json: Dict[str, Any] = {
"plot_public_key": "0xa04c6b5ac7dfb935f6feecfdd72348ccf1d4be4fe7e26acf271ea3b7d308da61e0a308f7a62495328a81f5147b66634c",
"file_size": 3368414292564311420,
"time_modified": 2573238947935295522,
"compression_level": 0,
}
request_plots_json: Dict[str, Any] = {}
@ -2085,6 +2087,7 @@ respond_plots_json: Dict[str, Any] = {
"plot_public_key": "0xa04c6b5ac7dfb935f6feecfdd72348ccf1d4be4fe7e26acf271ea3b7d308da61e0a308f7a62495328a81f5147b66634c",
"file_size": 3368414292564311420,
"time_modified": 2573238947935295522,
"compression_level": 0,
}
],
"failed_to_open_filenames": ["str"],

View File

@ -27,6 +27,24 @@ class TestMisc:
assert format_bytes(1024**10) == "1048576.000 YiB"
assert format_bytes(1024**20).endswith("YiB")
assert format_bytes(None, True) == "Invalid"
assert format_bytes(dict(), True) == "Invalid"
assert format_bytes("some bytes", True) == "Invalid"
assert format_bytes(-1024, True) == "Invalid"
assert format_bytes(0, True) == "0.000 MiBe"
assert format_bytes(1024, True) == "0.001 MiBe"
assert format_bytes(1024**2 - 1000, True) == "0.999 MiBe"
assert format_bytes(1024**2, True) == "1.000 MiBe"
assert format_bytes(1024**3, True) == "1.000 GiBe"
assert format_bytes(1024**4, True) == "1.000 TiBe"
assert format_bytes(1024**5, True) == "1.000 PiBe"
assert format_bytes(1024**6, True) == "1.000 EiBe"
assert format_bytes(1024**7, True) == "1.000 ZiBe"
assert format_bytes(1024**8, True) == "1.000 YiBe"
assert format_bytes(1024**9, True) == "1024.000 YiBe"
assert format_bytes(1024**10, True) == "1048576.000 YiBe"
assert format_bytes(1024**20, True).endswith("YiBe")
@pytest.mark.asyncio
async def test_format_minutes(self):
assert format_minutes(None) == "Invalid"

View File

@ -850,7 +850,7 @@ class TestCATTrades:
bundle = dataclasses.replace(offer._bundle, aggregated_signature=G2Element())
offer = dataclasses.replace(offer, _bundle=bundle)
tr1, txs1 = await trade_manager_taker.respond_to_offer(offer, peer, fee=uint64(10))
wallet_node_taker.wallet_tx_resend_timeout_secs = 1 # don't wait for resend
wallet_node_taker.wallet_tx_resend_timeout_secs = 0 # don't wait for resend
await wallet_node_taker._resend_queue()
await wallet_node_taker._resend_queue()
await wallet_node_taker._resend_queue()

View File

@ -382,10 +382,12 @@ async def test_get_farmed_amount(wallet_rpc_environment: WalletRpcTestEnvironmen
result = await wallet_rpc_client.get_farmed_amount()
expected_result = {
"blocks_won": 2,
"farmed_amount": 4_000_000_000_000,
"farmer_reward_amount": 500_000_000_000,
"fee_amount": 0,
"last_height_farmed": 2,
"last_time_farmed": result["last_time_farmed"], # time cannot be predicted so skipping test here
"pool_reward_amount": 3_500_000_000_000,
"success": True,
}

View File

@ -41,7 +41,9 @@ from chia.wallet.vc_wallet.vc_drivers import (
)
ACS: Program = Program.to([3, (1, "entropy"), 1, None])
ACS_2: Program = Program.to([3, (1, "entropy2"), 1, None])
ACS_PH: bytes32 = ACS.get_tree_hash()
ACS_2_PH: bytes32 = ACS_2.get_tree_hash()
MOCK_SINGLETON_MOD: Program = Program.to([2, 5, 11])
MOCK_SINGLETON_MOD_HASH: bytes32 = MOCK_SINGLETON_MOD.get_tree_hash()
MOCK_LAUNCHER_ID: bytes32 = bytes32([0] * 32)
@ -493,7 +495,7 @@ async def test_vc_lifecycle(test_syncing: bool, cost_logger: CostLogger) -> None
NEW_PROOF_HASH: bytes32 = NEW_PROOFS.get_tree_hash()
expected_announcement, update_spend, vc = vc.do_spend(
ACS,
Program.to([[51, ACS_PH, vc.coin.amount], vc.magic_condition_for_new_proofs(NEW_PROOF_HASH, ACS_PH)]),
Program.to([[51, ACS_2_PH, vc.coin.amount], vc.magic_condition_for_new_proofs(NEW_PROOF_HASH, ACS_PH)]),
new_proof_hash=NEW_PROOF_HASH,
)
for use_did, correct_did in ((False, None), (True, False), (True, True)):
@ -662,7 +664,7 @@ async def test_vc_lifecycle(test_syncing: bool, cost_logger: CostLogger) -> None
# Try to spend the coin to ourselves
_, auth_spend, new_vc = vc.do_spend(
ACS,
ACS_2,
Program.to(
[
[51, ACS_PH, vc.coin.amount],