Bluebox weight proof. (#1028)
* Initial commit weight proof bluebox. * More verification, fix linting. * Checkpoint testing. * Checkpoint verify compact proofs. * First attempt already seen compact proofs. * Try bigger timeouts. * Try passing first without ICC EOS. * Try to uniformly sanitize by field_vdf. * Try to fix vdf.py * Temporary change: check if simulation is slow or stalled. * Try fixing VDFProof erros in tests. * Checkpoint: address some comments. * Checkpoint is_fully_compactified. * First attempt compact blocks fixture. * Add tests. * Test simulation should eventually pass. * Test full node store passing. * Use the proper fixture. * Try lighter test_simulation. * Bump chiavdf, use correct fixture db, try test cache. * Update fixtures.py * Try bigger timeouts. * First attempt split tests. * Rename workflow files. * Relax test simulation since it's still failing. * Update test cache.
This commit is contained in:
parent
1d91a20112
commit
bac6e36c05
31 changed files with 1387 additions and 215 deletions
93
.github/workflows/build-test-macos-blockchain.yml
vendored
Normal file
93
.github/workflows/build-test-macos-blockchain.yml
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
name: MacOS Blockchain Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: MacOS Blockchain Tests
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
timeout-minutes: 80
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
max-parallel: 4
|
||||||
|
matrix:
|
||||||
|
python-version: [3.8, 3.9]
|
||||||
|
os: [macOS-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous runs on the same branch
|
||||||
|
if: ${{ github.ref != 'refs/heads/dev' }}
|
||||||
|
uses: styfle/cancel-workflow-action@0.8.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Python environment
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Get pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
|
||||||
|
- name: Cache pip
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
# Note that new runners may break this https://github.com/actions/cache/issues/292
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Checkout test blocks and plots
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: 'Chia-Network/test-cache'
|
||||||
|
path: '.chia'
|
||||||
|
ref: '0.18.0'
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Link home directory
|
||||||
|
run: |
|
||||||
|
cd $HOME
|
||||||
|
ln -s $GITHUB_WORKSPACE/.chia
|
||||||
|
echo "$HOME/.chia"
|
||||||
|
ls -al $HOME/.chia
|
||||||
|
|
||||||
|
- name: Run install script
|
||||||
|
env:
|
||||||
|
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
|
||||||
|
BUILD_VDF_CLIENT: "N"
|
||||||
|
run: |
|
||||||
|
brew install boost
|
||||||
|
sh install.sh
|
||||||
|
|
||||||
|
- name: Install timelord
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
sh install-timelord.sh
|
||||||
|
./vdf_bench square_asm 400000
|
||||||
|
|
||||||
|
- name: Install developer requirements
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
venv/bin/python -m pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
- name: Test blockchain code with pytest
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
./venv/bin/py.test tests/blockchain -s -v --durations 0
|
93
.github/workflows/build-test-macos-simulation.yml
vendored
Normal file
93
.github/workflows/build-test-macos-simulation.yml
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
name: MacOS Simulation Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: MacOS Simulation Tests
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
timeout-minutes: 80
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
max-parallel: 4
|
||||||
|
matrix:
|
||||||
|
python-version: [3.8, 3.9]
|
||||||
|
os: [macOS-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous runs on the same branch
|
||||||
|
if: ${{ github.ref != 'refs/heads/dev' }}
|
||||||
|
uses: styfle/cancel-workflow-action@0.8.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Python environment
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Get pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
|
||||||
|
- name: Cache pip
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
# Note that new runners may break this https://github.com/actions/cache/issues/292
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Checkout test blocks and plots
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: 'Chia-Network/test-cache'
|
||||||
|
path: '.chia'
|
||||||
|
ref: '0.18.0'
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Link home directory
|
||||||
|
run: |
|
||||||
|
cd $HOME
|
||||||
|
ln -s $GITHUB_WORKSPACE/.chia
|
||||||
|
echo "$HOME/.chia"
|
||||||
|
ls -al $HOME/.chia
|
||||||
|
|
||||||
|
- name: Run install script
|
||||||
|
env:
|
||||||
|
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
|
||||||
|
BUILD_VDF_CLIENT: "N"
|
||||||
|
run: |
|
||||||
|
brew install boost
|
||||||
|
sh install.sh
|
||||||
|
|
||||||
|
- name: Install timelord
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
sh install-timelord.sh
|
||||||
|
./vdf_bench square_asm 400000
|
||||||
|
|
||||||
|
- name: Install developer requirements
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
venv/bin/python -m pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
- name: Test blockchain code with pytest
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
./venv/bin/py.test tests/simulation -s -v --durations 0
|
105
.github/workflows/build-test-ubuntu-blockchain.yml
vendored
Normal file
105
.github/workflows/build-test-ubuntu-blockchain.yml
vendored
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
name: Ubuntu Blockchain Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Ubuntu Blockchain Tests
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
timeout-minutes: 90
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
max-parallel: 4
|
||||||
|
matrix:
|
||||||
|
python-version: [3.7, 3.8, 3.9]
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous runs on the same branch
|
||||||
|
if: ${{ github.ref != 'refs/heads/dev' }}
|
||||||
|
uses: styfle/cancel-workflow-action@0.8.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Python environment
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Cache npm
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
path: ~/.npm
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
|
- name: Get pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
|
||||||
|
- name: Cache pip
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Checkout test blocks and plots
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: 'Chia-Network/test-cache'
|
||||||
|
path: '.chia'
|
||||||
|
ref: '0.18.0'
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Link home directory
|
||||||
|
run: |
|
||||||
|
cd $HOME
|
||||||
|
ln -s $GITHUB_WORKSPACE/.chia
|
||||||
|
echo "$HOME/.chia"
|
||||||
|
ls -al $HOME/.chia
|
||||||
|
|
||||||
|
- name: Install ubuntu dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get install software-properties-common
|
||||||
|
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install python${{ matrix.python-version }}-venv python${{ matrix.python-version }}-distutils git -y
|
||||||
|
|
||||||
|
- name: Run install script
|
||||||
|
env:
|
||||||
|
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
|
||||||
|
run: |
|
||||||
|
sh install.sh
|
||||||
|
|
||||||
|
- name: Install timelord
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
sh install-timelord.sh
|
||||||
|
./vdf_bench square_asm 400000
|
||||||
|
|
||||||
|
- name: Install developer requirements
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
venv/bin/python -m pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
- name: Test blockchain code with pytest
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
./venv/bin/py.test tests/blockchain -s -v --durations 0
|
105
.github/workflows/build-test-ubuntu-simulation.yml
vendored
Normal file
105
.github/workflows/build-test-ubuntu-simulation.yml
vendored
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
name: Ubuntu Simulation Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Ubuntu Simulation Tests
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
timeout-minutes: 90
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
max-parallel: 4
|
||||||
|
matrix:
|
||||||
|
python-version: [3.7, 3.8, 3.9]
|
||||||
|
os: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous runs on the same branch
|
||||||
|
if: ${{ github.ref != 'refs/heads/dev' }}
|
||||||
|
uses: styfle/cancel-workflow-action@0.8.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
|
||||||
|
- name: Checkout Code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Python environment
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Cache npm
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
path: ~/.npm
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
|
- name: Get pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
|
||||||
|
- name: Cache pip
|
||||||
|
uses: actions/cache@v2.1.4
|
||||||
|
with:
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-pip-
|
||||||
|
|
||||||
|
- name: Checkout test blocks and plots
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
repository: 'Chia-Network/test-cache'
|
||||||
|
path: '.chia'
|
||||||
|
ref: '0.18.0'
|
||||||
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Link home directory
|
||||||
|
run: |
|
||||||
|
cd $HOME
|
||||||
|
ln -s $GITHUB_WORKSPACE/.chia
|
||||||
|
echo "$HOME/.chia"
|
||||||
|
ls -al $HOME/.chia
|
||||||
|
|
||||||
|
- name: Install ubuntu dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get install software-properties-common
|
||||||
|
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install python${{ matrix.python-version }}-venv python${{ matrix.python-version }}-distutils git -y
|
||||||
|
|
||||||
|
- name: Run install script
|
||||||
|
env:
|
||||||
|
INSTALL_PYTHON_VERSION: ${{ matrix.python-version }}
|
||||||
|
run: |
|
||||||
|
sh install.sh
|
||||||
|
|
||||||
|
- name: Install timelord
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
sh install-timelord.sh
|
||||||
|
./vdf_bench square_asm 400000
|
||||||
|
|
||||||
|
- name: Install developer requirements
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
venv/bin/python -m pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
- name: Test blockchain code with pytest
|
||||||
|
run: |
|
||||||
|
. ./activate
|
||||||
|
./venv/bin/py.test tests/simulation -s -v --durations 0
|
2
setup.py
2
setup.py
|
@ -4,7 +4,7 @@ from setuptools import setup
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aiter==0.13.20191203", # Used for async generator tools
|
"aiter==0.13.20191203", # Used for async generator tools
|
||||||
"blspy==0.3.5", # Signature library
|
"blspy==0.3.5", # Signature library
|
||||||
"chiavdf==0.15.0", # timelord and vdf verification
|
"chiavdf==0.15.1", # timelord and vdf verification
|
||||||
"chiabip158==0.19", # bip158-style wallet filters
|
"chiabip158==0.19", # bip158-style wallet filters
|
||||||
"chiapos==0.12.45", # proof of space
|
"chiapos==0.12.45", # proof of space
|
||||||
"clvm==0.9.0",
|
"clvm==0.9.0",
|
||||||
|
|
|
@ -197,10 +197,23 @@ def validate_unfinished_header_block(
|
||||||
number_of_iterations=icc_iters_committed,
|
number_of_iterations=icc_iters_committed,
|
||||||
):
|
):
|
||||||
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
||||||
if not skip_vdf_is_valid and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
|
if not skip_vdf_is_valid:
|
||||||
constants, icc_vdf_input, target_vdf_info, None
|
if (
|
||||||
):
|
not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
|
||||||
|
constants, icc_vdf_input, target_vdf_info, None
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
|
||||||
|
|
||||||
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
|
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
|
||||||
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
|
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
|
||||||
|
@ -323,12 +336,25 @@ def validate_unfinished_header_block(
|
||||||
):
|
):
|
||||||
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
|
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
|
||||||
|
|
||||||
# Pass in None for target info since we are only checking the proof from the temporary point,
|
if not skip_vdf_is_valid:
|
||||||
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
|
# Pass in None for target info since we are only checking the proof from the temporary point,
|
||||||
if not skip_vdf_is_valid and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
|
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
|
||||||
constants, cc_start_element, partial_cc_vdf_info, None
|
if (
|
||||||
):
|
not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
|
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
|
||||||
|
constants, cc_start_element, partial_cc_vdf_info, None
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
|
||||||
|
|
||||||
if genesis_block:
|
if genesis_block:
|
||||||
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
|
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
|
||||||
|
@ -553,7 +579,7 @@ def validate_unfinished_header_block(
|
||||||
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
|
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
|
||||||
None,
|
None,
|
||||||
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
|
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
|
||||||
SubSlotProofs(VDFProof(uint8(0), b""), None, VDFProof(uint8(0), b"")),
|
SubSlotProofs(VDFProof(uint8(0), b"", False), None, VDFProof(uint8(0), b"", False)),
|
||||||
)
|
)
|
||||||
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
|
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
|
||||||
else:
|
else:
|
||||||
|
@ -635,10 +661,21 @@ def validate_unfinished_header_block(
|
||||||
number_of_iterations=sp_iters,
|
number_of_iterations=sp_iters,
|
||||||
):
|
):
|
||||||
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
||||||
if not skip_vdf_is_valid and not header_block.challenge_chain_sp_proof.is_valid(
|
if not skip_vdf_is_valid:
|
||||||
constants, cc_vdf_input, target_vdf_info, None
|
if (
|
||||||
):
|
not header_block.challenge_chain_sp_proof.normalized_to_identity
|
||||||
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
and not header_block.challenge_chain_sp_proof.is_valid(constants, cc_vdf_input, target_vdf_info, None)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
||||||
|
if (
|
||||||
|
header_block.challenge_chain_sp_proof.normalized_to_identity
|
||||||
|
and not header_block.challenge_chain_sp_proof.is_valid(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
header_block.reward_chain_block.challenge_chain_sp_vdf,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_CC_SP_VDF)
|
||||||
else:
|
else:
|
||||||
assert overflow is not None
|
assert overflow is not None
|
||||||
if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:
|
if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:
|
||||||
|
@ -923,15 +960,27 @@ def validate_finished_header_block(
|
||||||
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
|
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
|
||||||
log.error(f"Block: {header_block}")
|
log.error(f"Block: {header_block}")
|
||||||
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
||||||
if not header_block.challenge_chain_ip_proof.is_valid(
|
if (
|
||||||
constants,
|
not header_block.challenge_chain_ip_proof.normalized_to_identity
|
||||||
cc_vdf_output,
|
and not header_block.challenge_chain_ip_proof.is_valid(
|
||||||
cc_target_vdf_info,
|
constants,
|
||||||
None,
|
cc_vdf_output,
|
||||||
|
cc_target_vdf_info,
|
||||||
|
None,
|
||||||
|
)
|
||||||
):
|
):
|
||||||
log.error(f"Did not validate, output {cc_vdf_output}")
|
log.error(f"Did not validate, output {cc_vdf_output}")
|
||||||
log.error(f"Block: {header_block}")
|
log.error(f"Block: {header_block}")
|
||||||
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
||||||
|
if (
|
||||||
|
header_block.challenge_chain_ip_proof.normalized_to_identity
|
||||||
|
and not header_block.challenge_chain_ip_proof.is_valid(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
header_block.reward_chain_block.challenge_chain_ip_vdf,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None, ValidationError(Err.INVALID_CC_IP_VDF)
|
||||||
|
|
||||||
# 30. Check reward chain infusion point VDF
|
# 30. Check reward chain infusion point VDF
|
||||||
rc_target_vdf_info = VDFInfo(
|
rc_target_vdf_info = VDFInfo(
|
||||||
|
|
|
@ -34,6 +34,7 @@ from src.consensus.block_header_validation import (
|
||||||
validate_unfinished_header_block,
|
validate_unfinished_header_block,
|
||||||
)
|
)
|
||||||
from src.types.unfinished_header_block import UnfinishedHeaderBlock
|
from src.types.unfinished_header_block import UnfinishedHeaderBlock
|
||||||
|
from src.types.blockchain_format.vdf import VDFInfo
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -73,6 +74,8 @@ class Blockchain(BlockchainInterface):
|
||||||
block_store: BlockStore
|
block_store: BlockStore
|
||||||
# Used to verify blocks in parallel
|
# Used to verify blocks in parallel
|
||||||
pool: ProcessPoolExecutor
|
pool: ProcessPoolExecutor
|
||||||
|
# Set holding seen compact proofs, in order to avoid duplicates.
|
||||||
|
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
|
||||||
|
|
||||||
# Whether blockchain is shut down or not
|
# Whether blockchain is shut down or not
|
||||||
_shut_down: bool
|
_shut_down: bool
|
||||||
|
@ -106,6 +109,7 @@ class Blockchain(BlockchainInterface):
|
||||||
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
|
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
|
||||||
self._shut_down = False
|
self._shut_down = False
|
||||||
await self._load_chain_from_store()
|
await self._load_chain_from_store()
|
||||||
|
self._seen_compact_proofs = set()
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def shut_down(self):
|
def shut_down(self):
|
||||||
|
@ -575,6 +579,14 @@ class Blockchain(BlockchainInterface):
|
||||||
async def get_header_blocks_in_range(self, start: int, stop: int) -> Dict[bytes32, HeaderBlock]:
|
async def get_header_blocks_in_range(self, start: int, stop: int) -> Dict[bytes32, HeaderBlock]:
|
||||||
return await self.block_store.get_header_blocks_in_range(start, stop)
|
return await self.block_store.get_header_blocks_in_range(start, stop)
|
||||||
|
|
||||||
|
async def get_header_block_by_height(self, height: int, header_hash: bytes32) -> Optional[HeaderBlock]:
|
||||||
|
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height)
|
||||||
|
if len(header_dict) == 0:
|
||||||
|
return None
|
||||||
|
if header_hash not in header_dict:
|
||||||
|
return None
|
||||||
|
return header_dict[header_hash]
|
||||||
|
|
||||||
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
|
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
|
||||||
if header_hash in self.__block_records:
|
if header_hash in self.__block_records:
|
||||||
return self.__block_records[header_hash]
|
return self.__block_records[header_hash]
|
||||||
|
@ -616,3 +628,14 @@ class Blockchain(BlockchainInterface):
|
||||||
if segments is None:
|
if segments is None:
|
||||||
return None
|
return None
|
||||||
return segments
|
return segments
|
||||||
|
|
||||||
|
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
|
||||||
|
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
|
||||||
|
pot_tuple = (vdf_info, height)
|
||||||
|
if pot_tuple in self._seen_compact_proofs:
|
||||||
|
return True
|
||||||
|
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
|
||||||
|
if len(self._seen_compact_proofs) > 10000:
|
||||||
|
self._seen_compact_proofs.clear()
|
||||||
|
self._seen_compact_proofs.add(pot_tuple)
|
||||||
|
return False
|
||||||
|
|
|
@ -6,6 +6,7 @@ from src.types.blockchain_format.sized_bytes import bytes32
|
||||||
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
||||||
from src.types.weight_proof import SubEpochChallengeSegment
|
from src.types.weight_proof import SubEpochChallengeSegment
|
||||||
from src.util.ints import uint32
|
from src.util.ints import uint32
|
||||||
|
from src.types.blockchain_format.vdf import VDFInfo
|
||||||
|
|
||||||
|
|
||||||
class BlockchainInterface:
|
class BlockchainInterface:
|
||||||
|
@ -51,6 +52,9 @@ class BlockchainInterface:
|
||||||
async def get_header_blocks_in_range(self, start: int, stop: int) -> Dict[bytes32, HeaderBlock]:
|
async def get_header_blocks_in_range(self, start: int, stop: int) -> Dict[bytes32, HeaderBlock]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
async def get_header_block_by_height(self, height: int, header_hash: bytes32) -> Optional[HeaderBlock]:
|
||||||
|
pass
|
||||||
|
|
||||||
def try_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
|
def try_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
|
||||||
if self.contains_block(header_hash):
|
if self.contains_block(header_hash):
|
||||||
return self.block_record(header_hash)
|
return self.block_record(header_hash)
|
||||||
|
@ -66,3 +70,6 @@ class BlockchainInterface:
|
||||||
sub_epoch_summary_height: uint32,
|
sub_epoch_summary_height: uint32,
|
||||||
) -> Optional[List[SubEpochChallengeSegment]]:
|
) -> Optional[List[SubEpochChallengeSegment]]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
|
||||||
|
pass
|
||||||
|
|
|
@ -26,7 +26,7 @@ class BlockStore:
|
||||||
self.db = connection
|
self.db = connection
|
||||||
await self.db.execute(
|
await self.db.execute(
|
||||||
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
|
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
|
||||||
" is_block tinyint, block blob)"
|
" is_block tinyint, is_fully_compactified tinyint, block blob)"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Block records
|
# Block records
|
||||||
|
@ -44,6 +44,7 @@ class BlockStore:
|
||||||
# Height index so we can look up in order of height for sync purposes
|
# Height index so we can look up in order of height for sync purposes
|
||||||
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
|
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
|
||||||
await self.db.execute("CREATE INDEX IF NOT EXISTS is_block on full_blocks(is_block)")
|
await self.db.execute("CREATE INDEX IF NOT EXISTS is_block on full_blocks(is_block)")
|
||||||
|
await self.db.execute("CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)")
|
||||||
|
|
||||||
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
|
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
|
||||||
|
|
||||||
|
@ -71,11 +72,12 @@ class BlockStore:
|
||||||
async def add_full_block(self, block: FullBlock, block_record: BlockRecord) -> None:
|
async def add_full_block(self, block: FullBlock, block_record: BlockRecord) -> None:
|
||||||
self.block_cache.put(block.header_hash, block)
|
self.block_cache.put(block.header_hash, block)
|
||||||
cursor_1 = await self.db.execute(
|
cursor_1 = await self.db.execute(
|
||||||
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?)",
|
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
|
||||||
(
|
(
|
||||||
block.header_hash.hex(),
|
block.header_hash.hex(),
|
||||||
block.height,
|
block.height,
|
||||||
int(block.is_transaction_block()),
|
int(block.is_transaction_block()),
|
||||||
|
int(block.is_fully_compactified()),
|
||||||
bytes(block),
|
bytes(block),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
@ -319,3 +321,23 @@ class BlockStore:
|
||||||
(header_hash.hex(),),
|
(header_hash.hex(),),
|
||||||
)
|
)
|
||||||
await cursor_2.close()
|
await cursor_2.close()
|
||||||
|
|
||||||
|
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
|
||||||
|
cursor = await self.db.execute(
|
||||||
|
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (header_hash.hex(),)
|
||||||
|
)
|
||||||
|
row = await cursor.fetchone()
|
||||||
|
await cursor.close()
|
||||||
|
if row is None:
|
||||||
|
return None
|
||||||
|
return bool(row[0])
|
||||||
|
|
||||||
|
async def get_first_not_compactified(self, min_height: int) -> Optional[int]:
|
||||||
|
cursor = await self.db.execute(
|
||||||
|
"SELECT MIN(height) from full_blocks WHERE is_fully_compactified=0 AND height>=?", (min_height,)
|
||||||
|
)
|
||||||
|
row = await cursor.fetchone()
|
||||||
|
await cursor.close()
|
||||||
|
if row is None:
|
||||||
|
return None
|
||||||
|
return int(row[0])
|
||||||
|
|
|
@ -47,10 +47,12 @@ from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
||||||
from src.types.mempool_inclusion_status import MempoolInclusionStatus
|
from src.types.mempool_inclusion_status import MempoolInclusionStatus
|
||||||
from src.types.spend_bundle import SpendBundle
|
from src.types.spend_bundle import SpendBundle
|
||||||
from src.types.unfinished_block import UnfinishedBlock
|
from src.types.unfinished_block import UnfinishedBlock
|
||||||
|
from src.types.blockchain_format.vdf import VDFInfo, VDFProof, CompressibleVDFField
|
||||||
from src.util.errors import ConsensusError, Err
|
from src.util.errors import ConsensusError, Err
|
||||||
from src.util.ints import uint32, uint128, uint8, uint64
|
from src.util.ints import uint32, uint128, uint8, uint64
|
||||||
from src.util.path import mkdir, path_from_root
|
from src.util.path import mkdir, path_from_root
|
||||||
|
from src.types.header_block import HeaderBlock
|
||||||
|
from src.types.blockchain_format.classgroup import ClassgroupElement
|
||||||
|
|
||||||
|
|
||||||
class FullNode:
|
class FullNode:
|
||||||
|
@ -130,9 +132,18 @@ class FullNode:
|
||||||
self.state_changed_callback = None
|
self.state_changed_callback = None
|
||||||
|
|
||||||
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
||||||
|
self.uncompact_task = None
|
||||||
if peak is not None:
|
if peak is not None:
|
||||||
full_peak = await self.blockchain.get_full_peak()
|
full_peak = await self.blockchain.get_full_peak()
|
||||||
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
|
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
|
||||||
|
if self.config["send_uncompact_interval"] != 0:
|
||||||
|
assert self.config["target_uncompact_proofs"] != 0
|
||||||
|
self.uncompact_task = asyncio.create_task(
|
||||||
|
self.broadcast_uncompact_blocks(
|
||||||
|
self.config["send_uncompact_interval"],
|
||||||
|
self.config["target_uncompact_proofs"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def set_server(self, server: ChiaServer):
|
def set_server(self, server: ChiaServer):
|
||||||
self.server = server
|
self.server = server
|
||||||
|
@ -474,6 +485,8 @@ class FullNode:
|
||||||
self.mempool_manager.shut_down()
|
self.mempool_manager.shut_down()
|
||||||
if self.full_node_peers is not None:
|
if self.full_node_peers is not None:
|
||||||
asyncio.create_task(self.full_node_peers.close())
|
asyncio.create_task(self.full_node_peers.close())
|
||||||
|
if self.uncompact_task is not None:
|
||||||
|
self.uncompact_task.cancel()
|
||||||
|
|
||||||
async def _await_closed(self):
|
async def _await_closed(self):
|
||||||
try:
|
try:
|
||||||
|
@ -1346,3 +1359,326 @@ class FullNode:
|
||||||
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
|
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
|
||||||
)
|
)
|
||||||
return status, error
|
return status, error
|
||||||
|
|
||||||
|
async def _needs_compact_proof(
|
||||||
|
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
|
||||||
|
) -> bool:
|
||||||
|
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
||||||
|
for sub_slot in header_block.finished_sub_slots:
|
||||||
|
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
|
||||||
|
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
||||||
|
for sub_slot in header_block.finished_sub_slots:
|
||||||
|
if (
|
||||||
|
sub_slot.infused_challenge_chain is not None
|
||||||
|
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
|
||||||
|
):
|
||||||
|
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
|
||||||
|
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
if field_vdf == CompressibleVDFField.CC_SP_VDF:
|
||||||
|
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
|
||||||
|
return False
|
||||||
|
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
|
||||||
|
assert header_block.challenge_chain_sp_proof is not None
|
||||||
|
if (
|
||||||
|
header_block.challenge_chain_sp_proof.witness_type == 0
|
||||||
|
and header_block.challenge_chain_sp_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
if field_vdf == CompressibleVDFField.CC_IP_VDF:
|
||||||
|
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
|
||||||
|
if (
|
||||||
|
header_block.challenge_chain_ip_proof.witness_type == 0
|
||||||
|
and header_block.challenge_chain_ip_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def _can_accept_compact_proof(
|
||||||
|
self,
|
||||||
|
vdf_info: VDFInfo,
|
||||||
|
vdf_proof: VDFProof,
|
||||||
|
height: uint32,
|
||||||
|
header_hash: bytes32,
|
||||||
|
field_vdf: CompressibleVDFField,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
- Checks if the provided proof is indeed compact.
|
||||||
|
- Checks if proof verifies given the vdf_info from the start of sub-slot.
|
||||||
|
- Checks if the provided vdf_info is correct, assuming it refers to the start of sub-slot.
|
||||||
|
- Checks if the existing proof was non-compact. Ignore this proof if we already have a compact proof.
|
||||||
|
"""
|
||||||
|
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
|
||||||
|
if is_fully_compactified is None or is_fully_compactified:
|
||||||
|
return False
|
||||||
|
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
|
||||||
|
return False
|
||||||
|
if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info):
|
||||||
|
return False
|
||||||
|
header_block = await self.blockchain.get_header_block_by_height(height, header_hash)
|
||||||
|
if header_block is None:
|
||||||
|
return False
|
||||||
|
return await self._needs_compact_proof(vdf_info, header_block, field_vdf)
|
||||||
|
|
||||||
|
async def _replace_proof(
|
||||||
|
self,
|
||||||
|
vdf_info: VDFInfo,
|
||||||
|
vdf_proof: VDFProof,
|
||||||
|
height: uint32,
|
||||||
|
field_vdf: CompressibleVDFField,
|
||||||
|
):
|
||||||
|
full_blocks = await self.block_store.get_full_blocks_at([height])
|
||||||
|
assert len(full_blocks) > 0
|
||||||
|
for block in full_blocks:
|
||||||
|
new_block = None
|
||||||
|
block_record = self.blockchain.height_to_block_record(height)
|
||||||
|
|
||||||
|
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
||||||
|
for index, sub_slot in enumerate(block.finished_sub_slots):
|
||||||
|
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
|
||||||
|
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
|
||||||
|
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
|
||||||
|
new_finished_subslots = block.finished_sub_slots
|
||||||
|
new_finished_subslots[index] = new_subslot
|
||||||
|
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
|
||||||
|
break
|
||||||
|
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
||||||
|
for index, sub_slot in enumerate(block.finished_sub_slots):
|
||||||
|
if (
|
||||||
|
sub_slot.infused_challenge_chain is not None
|
||||||
|
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
|
||||||
|
):
|
||||||
|
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
|
||||||
|
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
|
||||||
|
new_finished_subslots = block.finished_sub_slots
|
||||||
|
new_finished_subslots[index] = new_subslot
|
||||||
|
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
|
||||||
|
break
|
||||||
|
if field_vdf == CompressibleVDFField.CC_SP_VDF:
|
||||||
|
assert block.challenge_chain_sp_proof is not None
|
||||||
|
new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof)
|
||||||
|
if field_vdf == CompressibleVDFField.CC_IP_VDF:
|
||||||
|
new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof)
|
||||||
|
assert new_block is not None
|
||||||
|
await self.block_store.add_full_block(new_block, block_record)
|
||||||
|
|
||||||
|
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
|
||||||
|
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
||||||
|
if not await self._can_accept_compact_proof(
|
||||||
|
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
|
||||||
|
):
|
||||||
|
self.log.error(f"Couldn't add compact proof of time from a bluebox: {request}.")
|
||||||
|
return
|
||||||
|
async with self.blockchain.lock:
|
||||||
|
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
|
||||||
|
msg = make_msg(
|
||||||
|
ProtocolMessageTypes.new_compact_vdf,
|
||||||
|
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
|
||||||
|
)
|
||||||
|
if self.server is not None:
|
||||||
|
await self.server.send_to_all([msg], NodeType.FULL_NODE)
|
||||||
|
|
||||||
|
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
|
||||||
|
if is_fully_compactified is None or is_fully_compactified:
|
||||||
|
return False
|
||||||
|
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
|
||||||
|
if header_block is None:
|
||||||
|
return
|
||||||
|
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
||||||
|
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
|
||||||
|
msg = make_msg(
|
||||||
|
ProtocolMessageTypes.request_compact_vdf,
|
||||||
|
full_node_protocol.RequestCompactVDF(
|
||||||
|
request.height, request.header_hash, request.field_vdf, request.vdf_info
|
||||||
|
),
|
||||||
|
)
|
||||||
|
await peer.send_message(msg)
|
||||||
|
|
||||||
|
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
|
||||||
|
if header_block is None:
|
||||||
|
return
|
||||||
|
vdf_proof: Optional[VDFProof] = None
|
||||||
|
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
||||||
|
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
||||||
|
for sub_slot in header_block.finished_sub_slots:
|
||||||
|
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
|
||||||
|
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
|
||||||
|
break
|
||||||
|
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
||||||
|
for sub_slot in header_block.finished_sub_slots:
|
||||||
|
if (
|
||||||
|
sub_slot.infused_challenge_chain is not None
|
||||||
|
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
|
||||||
|
):
|
||||||
|
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
|
||||||
|
break
|
||||||
|
if (
|
||||||
|
field_vdf == CompressibleVDFField.CC_SP_VDF
|
||||||
|
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
|
||||||
|
):
|
||||||
|
vdf_proof = header_block.challenge_chain_sp_proof
|
||||||
|
if (
|
||||||
|
field_vdf == CompressibleVDFField.CC_IP_VDF
|
||||||
|
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
|
||||||
|
):
|
||||||
|
vdf_proof = header_block.challenge_chain_ip_proof
|
||||||
|
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
|
||||||
|
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
|
||||||
|
return
|
||||||
|
compact_vdf = full_node_protocol.RespondCompactVDF(
|
||||||
|
request.height,
|
||||||
|
request.header_hash,
|
||||||
|
request.field_vdf,
|
||||||
|
request.vdf_info,
|
||||||
|
vdf_proof,
|
||||||
|
)
|
||||||
|
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
|
||||||
|
await peer.send_message(msg)
|
||||||
|
|
||||||
|
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
||||||
|
if not await self._can_accept_compact_proof(
|
||||||
|
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
|
||||||
|
):
|
||||||
|
self.log.error(f"Couldn't add compact proof of time from a full_node peer: {peer}.")
|
||||||
|
return
|
||||||
|
async with self.blockchain.lock:
|
||||||
|
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
|
||||||
|
return
|
||||||
|
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
|
||||||
|
msg = make_msg(
|
||||||
|
ProtocolMessageTypes.new_compact_vdf,
|
||||||
|
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
|
||||||
|
)
|
||||||
|
if self.server is not None:
|
||||||
|
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
|
||||||
|
|
||||||
|
async def broadcast_uncompact_blocks(self, uncompact_interval_scan: int, target_uncompact_proofs: int):
|
||||||
|
min_height: Optional[int] = 0
|
||||||
|
try:
|
||||||
|
while not self._shut_down:
|
||||||
|
while self.sync_store.get_sync_mode():
|
||||||
|
if self._shut_down:
|
||||||
|
return
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
|
broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = []
|
||||||
|
new_min_height = None
|
||||||
|
max_height = self.blockchain.get_peak_height()
|
||||||
|
if max_height is None:
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
continue
|
||||||
|
# Calculate 'min_height' correctly the first time this task is launched, using the db.
|
||||||
|
assert min_height is not None
|
||||||
|
min_height = await self.block_store.get_first_not_compactified(min_height)
|
||||||
|
if min_height is None or min_height > max(0, max_height - 1000):
|
||||||
|
min_height = max(0, max_height - 1000)
|
||||||
|
batches_finished = 0
|
||||||
|
self.log.info("Scanning the blockchain for uncompact blocks.")
|
||||||
|
for h in range(min_height, max_height, 100):
|
||||||
|
# Got 10 times the target header count, sampling the target headers should contain
|
||||||
|
# enough randomness to split the work between blueboxes.
|
||||||
|
if len(broadcast_list) > target_uncompact_proofs * 10:
|
||||||
|
break
|
||||||
|
stop_height = min(h + 99, max_height)
|
||||||
|
headers = await self.blockchain.get_header_blocks_in_range(min_height, stop_height)
|
||||||
|
for header in headers.values():
|
||||||
|
prev_broadcast_list_len = len(broadcast_list)
|
||||||
|
expected_header_hash = self.blockchain.height_to_hash(header.height)
|
||||||
|
if header.header_hash != expected_header_hash:
|
||||||
|
continue
|
||||||
|
for sub_slot in header.finished_sub_slots:
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
|
||||||
|
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
broadcast_list.append(
|
||||||
|
timelord_protocol.RequestCompactProofOfTime(
|
||||||
|
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
|
||||||
|
header.header_hash,
|
||||||
|
header.height,
|
||||||
|
uint8(CompressibleVDFField.CC_EOS_VDF),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
|
||||||
|
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
|
||||||
|
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
assert sub_slot.infused_challenge_chain is not None
|
||||||
|
broadcast_list.append(
|
||||||
|
timelord_protocol.RequestCompactProofOfTime(
|
||||||
|
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||||
|
header.header_hash,
|
||||||
|
header.height,
|
||||||
|
uint8(CompressibleVDFField.ICC_EOS_VDF),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if header.challenge_chain_sp_proof is not None and (
|
||||||
|
header.challenge_chain_sp_proof.witness_type > 0
|
||||||
|
or not header.challenge_chain_sp_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
|
||||||
|
broadcast_list.append(
|
||||||
|
timelord_protocol.RequestCompactProofOfTime(
|
||||||
|
header.reward_chain_block.challenge_chain_sp_vdf,
|
||||||
|
header.header_hash,
|
||||||
|
header.height,
|
||||||
|
uint8(CompressibleVDFField.CC_SP_VDF),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
header.challenge_chain_ip_proof.witness_type > 0
|
||||||
|
or not header.challenge_chain_ip_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
broadcast_list.append(
|
||||||
|
timelord_protocol.RequestCompactProofOfTime(
|
||||||
|
header.reward_chain_block.challenge_chain_ip_vdf,
|
||||||
|
header.header_hash,
|
||||||
|
header.height,
|
||||||
|
uint8(CompressibleVDFField.CC_IP_VDF),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# This is the first header with uncompact proofs. Store its height so next time we iterate
|
||||||
|
# only from here. Fix header block iteration window to at least 1000, so reorgs will be
|
||||||
|
# handled correctly.
|
||||||
|
if prev_broadcast_list_len == 0 and len(broadcast_list) > 0 and h <= max(0, max_height - 1000):
|
||||||
|
new_min_height = header.height
|
||||||
|
|
||||||
|
# Small sleep between batches.
|
||||||
|
batches_finished += 1
|
||||||
|
if batches_finished % 10 == 0:
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
# We have no uncompact blocks, but mentain the block iteration window to at least 1000 blocks.
|
||||||
|
if new_min_height is None:
|
||||||
|
new_min_height = max(0, max_height - 1000)
|
||||||
|
min_height = new_min_height
|
||||||
|
if len(broadcast_list) > target_uncompact_proofs:
|
||||||
|
random.shuffle(broadcast_list)
|
||||||
|
broadcast_list = broadcast_list[:target_uncompact_proofs]
|
||||||
|
if self.sync_store.get_sync_mode():
|
||||||
|
continue
|
||||||
|
if self.server is not None:
|
||||||
|
for new_pot in broadcast_list:
|
||||||
|
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
|
||||||
|
await self.server.send_to_all([msg], NodeType.TIMELORD)
|
||||||
|
await asyncio.sleep(uncompact_interval_scan)
|
||||||
|
except Exception as e:
|
||||||
|
error_stack = traceback.format_exc()
|
||||||
|
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
|
||||||
|
self.log.error(f"Exception Stack: {error_stack}")
|
||||||
|
|
|
@ -1085,3 +1085,30 @@ class FullNodeAPI:
|
||||||
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
|
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
|
||||||
)
|
)
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
@api_request
|
||||||
|
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
|
||||||
|
if self.full_node.sync_store.get_sync_mode():
|
||||||
|
return None
|
||||||
|
await self.full_node.respond_compact_vdf_timelord(request)
|
||||||
|
|
||||||
|
@peer_required
|
||||||
|
@api_request
|
||||||
|
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
if self.full_node.sync_store.get_sync_mode():
|
||||||
|
return None
|
||||||
|
await self.full_node.new_compact_vdf(request, peer)
|
||||||
|
|
||||||
|
@peer_required
|
||||||
|
@api_request
|
||||||
|
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
if self.full_node.sync_store.get_sync_mode():
|
||||||
|
return None
|
||||||
|
await self.full_node.request_compact_vdf(request, peer)
|
||||||
|
|
||||||
|
@peer_required
|
||||||
|
@api_request
|
||||||
|
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
|
||||||
|
if self.full_node.sync_store.get_sync_mode():
|
||||||
|
return None
|
||||||
|
await self.full_node.respond_compact_vdf(request, peer)
|
||||||
|
|
|
@ -258,10 +258,22 @@ class FullNodeStore:
|
||||||
number_of_iterations=sub_slot_iters,
|
number_of_iterations=sub_slot_iters,
|
||||||
):
|
):
|
||||||
return None
|
return None
|
||||||
if not eos.proofs.challenge_chain_slot_proof.is_valid(
|
if (
|
||||||
self.constants,
|
not eos.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
cc_start_element,
|
and not eos.proofs.challenge_chain_slot_proof.is_valid(
|
||||||
partial_cc_vdf_info,
|
self.constants,
|
||||||
|
cc_start_element,
|
||||||
|
partial_cc_vdf_info,
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None
|
||||||
|
if (
|
||||||
|
eos.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
and not eos.proofs.challenge_chain_slot_proof.is_valid(
|
||||||
|
self.constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
eos.challenge_chain.challenge_chain_end_of_slot_vdf,
|
||||||
|
)
|
||||||
):
|
):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -292,8 +304,20 @@ class FullNodeStore:
|
||||||
number_of_iterations=icc_iters,
|
number_of_iterations=icc_iters,
|
||||||
):
|
):
|
||||||
return None
|
return None
|
||||||
if not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
|
if (
|
||||||
self.constants, icc_start_element, partial_icc_vdf_info
|
not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
|
||||||
|
self.constants, icc_start_element, partial_icc_vdf_info
|
||||||
|
)
|
||||||
|
):
|
||||||
|
return None
|
||||||
|
if (
|
||||||
|
eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
|
||||||
|
self.constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
||||||
|
)
|
||||||
):
|
):
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
|
@ -411,12 +435,18 @@ class FullNodeStore:
|
||||||
assert curr is not None
|
assert curr is not None
|
||||||
start_ele = curr.challenge_vdf_output
|
start_ele = curr.challenge_vdf_output
|
||||||
if not skip_vdf_validation:
|
if not skip_vdf_validation:
|
||||||
if not signage_point.cc_proof.is_valid(
|
if not signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
|
||||||
self.constants,
|
self.constants,
|
||||||
start_ele,
|
start_ele,
|
||||||
cc_vdf_info_expected,
|
cc_vdf_info_expected,
|
||||||
):
|
):
|
||||||
return False
|
return False
|
||||||
|
if signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
|
||||||
|
self.constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
signage_point.cc_vdf,
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
|
||||||
if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge:
|
if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge:
|
||||||
# This signage point is probably outdated
|
# This signage point is probably outdated
|
||||||
|
|
|
@ -3,7 +3,6 @@ from typing import List, Optional
|
||||||
|
|
||||||
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
|
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
|
||||||
from src.types.full_block import FullBlock
|
from src.types.full_block import FullBlock
|
||||||
from src.types.blockchain_format.slots import SubSlotProofs
|
|
||||||
from src.types.spend_bundle import SpendBundle
|
from src.types.spend_bundle import SpendBundle
|
||||||
from src.types.unfinished_block import UnfinishedBlock
|
from src.types.unfinished_block import UnfinishedBlock
|
||||||
from src.types.blockchain_format.sized_bytes import bytes32
|
from src.types.blockchain_format.sized_bytes import bytes32
|
||||||
|
@ -163,21 +162,30 @@ class RequestMempoolTransactions(Streamable):
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
@streamable
|
@streamable
|
||||||
class RequestCompactVDFs(Streamable):
|
class NewCompactVDF(Streamable):
|
||||||
height: uint32
|
height: uint32
|
||||||
|
header_hash: bytes32
|
||||||
|
field_vdf: uint8
|
||||||
|
vdf_info: VDFInfo
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
@streamable
|
@streamable
|
||||||
class RespondCompactVDFs(Streamable):
|
class RequestCompactVDF(Streamable):
|
||||||
height: uint32
|
height: uint32
|
||||||
header_hash: bytes32
|
header_hash: bytes32
|
||||||
end_of_slot_proofs: List[SubSlotProofs] # List of challenge eos vdf and reward eos vdf
|
field_vdf: uint8
|
||||||
cc_sp_proof: Optional[VDFProof] # If not first sp
|
vdf_info: VDFInfo
|
||||||
rc_sp_proof: Optional[VDFProof] # If not first sp
|
|
||||||
cc_ip_proof: VDFProof
|
|
||||||
icc_ip_proof: Optional[VDFProof]
|
@dataclass(frozen=True)
|
||||||
rc_ip_proof: VDFProof
|
@streamable
|
||||||
|
class RespondCompactVDF(Streamable):
|
||||||
|
height: uint32
|
||||||
|
header_hash: bytes32
|
||||||
|
field_vdf: uint8
|
||||||
|
vdf_info: VDFInfo
|
||||||
|
vdf_proof: VDFProof
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
|
|
|
@ -26,56 +26,59 @@ class ProtocolMessageTypes(Enum):
|
||||||
new_infusion_point_vdf = 15
|
new_infusion_point_vdf = 15
|
||||||
new_signage_point_vdf = 16
|
new_signage_point_vdf = 16
|
||||||
new_end_of_sub_slot_vdf = 17
|
new_end_of_sub_slot_vdf = 17
|
||||||
|
request_compact_proof_of_time = 18
|
||||||
|
respond_compact_vdf_timelord = 19
|
||||||
|
|
||||||
# Full node protocol (full_node <-> full_node)
|
# Full node protocol (full_node <-> full_node)
|
||||||
new_peak = 18
|
new_peak = 20
|
||||||
new_transaction = 19
|
new_transaction = 21
|
||||||
request_transaction = 20
|
request_transaction = 22
|
||||||
respond_transaction = 21
|
respond_transaction = 23
|
||||||
request_proof_of_weight = 22
|
request_proof_of_weight = 24
|
||||||
respond_proof_of_weight = 23
|
respond_proof_of_weight = 25
|
||||||
request_block = 24
|
request_block = 26
|
||||||
respond_block = 25
|
respond_block = 27
|
||||||
reject_block = 26
|
reject_block = 28
|
||||||
request_blocks = 27
|
request_blocks = 29
|
||||||
respond_blocks = 28
|
respond_blocks = 30
|
||||||
reject_blocks = 29
|
reject_blocks = 31
|
||||||
new_unfinished_block = 30
|
new_unfinished_block = 32
|
||||||
request_unfinished_block = 31
|
request_unfinished_block = 33
|
||||||
respond_unfinished_block = 32
|
respond_unfinished_block = 34
|
||||||
new_signage_point_or_end_of_sub_slot = 33
|
new_signage_point_or_end_of_sub_slot = 35
|
||||||
request_signage_point_or_end_of_sub_slot = 34
|
request_signage_point_or_end_of_sub_slot = 36
|
||||||
respond_signage_point = 35
|
respond_signage_point = 37
|
||||||
respond_end_of_sub_slot = 36
|
respond_end_of_sub_slot = 38
|
||||||
request_mempool_transactions = 37
|
request_mempool_transactions = 39
|
||||||
request_compact_vdfs = 38
|
request_compact_vdf = 40
|
||||||
respond_compact_vdfs = 39
|
respond_compact_vdf = 41
|
||||||
request_peers = 40
|
new_compact_vdf = 42
|
||||||
respond_peers = 41
|
request_peers = 43
|
||||||
|
respond_peers = 44
|
||||||
|
|
||||||
# Wallet protocol (wallet <-> full_node)
|
# Wallet protocol (wallet <-> full_node)
|
||||||
request_puzzle_solution = 42
|
request_puzzle_solution = 45
|
||||||
respond_puzzle_solution = 43
|
respond_puzzle_solution = 46
|
||||||
reject_puzzle_solution = 44
|
reject_puzzle_solution = 47
|
||||||
send_transaction = 45
|
send_transaction = 48
|
||||||
transaction_ack = 46
|
transaction_ack = 49
|
||||||
new_peak_wallet = 47
|
new_peak_wallet = 50
|
||||||
request_block_header = 48
|
request_block_header = 51
|
||||||
respond_block_header = 49
|
respond_block_header = 52
|
||||||
reject_header_request = 50
|
reject_header_request = 53
|
||||||
request_removals = 51
|
request_removals = 54
|
||||||
respond_removals = 52
|
respond_removals = 55
|
||||||
reject_removals_request = 53
|
reject_removals_request = 56
|
||||||
request_additions = 54
|
request_additions = 57
|
||||||
respond_additions = 55
|
respond_additions = 58
|
||||||
reject_additions_request = 56
|
reject_additions_request = 59
|
||||||
request_header_blocks = 57
|
request_header_blocks = 60
|
||||||
reject_header_blocks = 58
|
reject_header_blocks = 61
|
||||||
respond_header_blocks = 59
|
respond_header_blocks = 62
|
||||||
|
|
||||||
# Introducer protocol (introducer <-> full_node)
|
# Introducer protocol (introducer <-> full_node)
|
||||||
request_peers_introducer = 60
|
request_peers_introducer = 63
|
||||||
respond_peers_introducer = 61
|
respond_peers_introducer = 64
|
||||||
|
|
||||||
# Simulator protocol
|
# Simulator protocol
|
||||||
farm_new_block = 62
|
farm_new_block = 65
|
||||||
|
|
|
@ -4,7 +4,7 @@ from src.types.blockchain_format.sized_bytes import bytes32
|
||||||
from src.util.ints import uint16, uint8
|
from src.util.ints import uint16, uint8
|
||||||
from src.util.streamable import streamable, Streamable
|
from src.util.streamable import streamable, Streamable
|
||||||
|
|
||||||
protocol_version = "0.0.30"
|
protocol_version = "0.0.31"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Handshake when establishing a connection between two servers.
|
Handshake when establishing a connection between two servers.
|
||||||
|
|
|
@ -10,7 +10,7 @@ from src.types.blockchain_format.reward_chain_block import (
|
||||||
from src.types.blockchain_format.sized_bytes import bytes32
|
from src.types.blockchain_format.sized_bytes import bytes32
|
||||||
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
||||||
from src.types.blockchain_format.vdf import VDFInfo, VDFProof
|
from src.types.blockchain_format.vdf import VDFInfo, VDFProof
|
||||||
from src.util.ints import uint8, uint64, uint128
|
from src.util.ints import uint8, uint32, uint64, uint128
|
||||||
from src.util.streamable import streamable, Streamable
|
from src.util.streamable import streamable, Streamable
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -73,3 +73,22 @@ class NewSignagePointVDF(Streamable):
|
||||||
@streamable
|
@streamable
|
||||||
class NewEndOfSubSlotVDF(Streamable):
|
class NewEndOfSubSlotVDF(Streamable):
|
||||||
end_of_sub_slot_bundle: EndOfSubSlotBundle
|
end_of_sub_slot_bundle: EndOfSubSlotBundle
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
@streamable
|
||||||
|
class RequestCompactProofOfTime:
|
||||||
|
new_proof_of_time: VDFInfo
|
||||||
|
header_hash: bytes32
|
||||||
|
height: uint32
|
||||||
|
field_vdf: uint8
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
@streamable
|
||||||
|
class RespondCompactProofOfTime:
|
||||||
|
vdf_info: VDFInfo
|
||||||
|
vdf_proof: VDFProof
|
||||||
|
header_hash: bytes32
|
||||||
|
height: uint32
|
||||||
|
field_vdf: uint8
|
||||||
|
|
|
@ -4,6 +4,7 @@ import io
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import random
|
||||||
from typing import Dict, List, Optional, Tuple, Callable
|
from typing import Dict, List, Optional, Tuple, Callable
|
||||||
|
|
||||||
from chiavdf import create_discriminant
|
from chiavdf import create_discriminant
|
||||||
|
@ -85,11 +86,15 @@ class Timelord:
|
||||||
self.total_unfinished: int = 0
|
self.total_unfinished: int = 0
|
||||||
self.total_infused: int = 0
|
self.total_infused: int = 0
|
||||||
self.state_changed_callback: Optional[Callable] = None
|
self.state_changed_callback: Optional[Callable] = None
|
||||||
|
self.sanitizer_mode = self.config["sanitizer_mode"]
|
||||||
|
self.pending_bluebox_info: List[timelord_protocol.RequestCompactProofOfTime] = []
|
||||||
|
|
||||||
async def _start(self):
|
async def _start(self):
|
||||||
self.lock: asyncio.Lock = asyncio.Lock()
|
self.lock: asyncio.Lock = asyncio.Lock()
|
||||||
self.main_loop = asyncio.create_task(self._manage_chains())
|
if not self.sanitizer_mode:
|
||||||
|
self.main_loop = asyncio.create_task(self._manage_chains())
|
||||||
|
else:
|
||||||
|
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
|
||||||
self.vdf_server = await asyncio.start_server(
|
self.vdf_server = await asyncio.start_server(
|
||||||
self._handle_client,
|
self._handle_client,
|
||||||
self.config["vdf_server"]["host"],
|
self.config["vdf_server"]["host"],
|
||||||
|
@ -213,7 +218,7 @@ class Timelord:
|
||||||
# Adjust all unfinished blocks iterations to the peak.
|
# Adjust all unfinished blocks iterations to the peak.
|
||||||
new_unfinished_blocks = []
|
new_unfinished_blocks = []
|
||||||
self.proofs_finished = []
|
self.proofs_finished = []
|
||||||
for chain in Chain:
|
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
|
||||||
self.iters_to_submit[chain] = []
|
self.iters_to_submit[chain] = []
|
||||||
self.iters_submitted[chain] = []
|
self.iters_submitted[chain] = []
|
||||||
self.iteration_to_proof_type = {}
|
self.iteration_to_proof_type = {}
|
||||||
|
@ -290,7 +295,7 @@ class Timelord:
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _submit_iterations(self):
|
async def _submit_iterations(self):
|
||||||
for chain in Chain:
|
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
|
||||||
if chain in self.allows_iters:
|
if chain in self.allows_iters:
|
||||||
# log.info(f"Trying to submit for chain {chain}.")
|
# log.info(f"Trying to submit for chain {chain}.")
|
||||||
_, _, writer = self.chain_type_to_stream[chain]
|
_, _, writer = self.chain_type_to_stream[chain]
|
||||||
|
@ -744,6 +749,11 @@ class Timelord:
|
||||||
ip: str,
|
ip: str,
|
||||||
reader: asyncio.StreamReader,
|
reader: asyncio.StreamReader,
|
||||||
writer: asyncio.StreamWriter,
|
writer: asyncio.StreamWriter,
|
||||||
|
# Data specific only when running in bluebox mode.
|
||||||
|
bluebox_iteration: Optional[uint64] = None,
|
||||||
|
header_hash: Optional[bytes32] = None,
|
||||||
|
height: Optional[uint32] = None,
|
||||||
|
field_vdf: Optional[uint8] = None,
|
||||||
):
|
):
|
||||||
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
|
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
|
||||||
|
|
||||||
|
@ -751,12 +761,15 @@ class Timelord:
|
||||||
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
|
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
|
||||||
# the timelord tells the vdf_client what to execute.
|
# the timelord tells the vdf_client what to execute.
|
||||||
async with self.lock:
|
async with self.lock:
|
||||||
if self.config["fast_algorithm"]:
|
if self.sanitizer_mode:
|
||||||
# Run n-wesolowski (fast) algorithm.
|
writer.write(b"S")
|
||||||
writer.write(b"N")
|
|
||||||
else:
|
else:
|
||||||
# Run two-wesolowski (slow) algorithm.
|
if self.config["fast_algorithm"]:
|
||||||
writer.write(b"T")
|
# Run n-wesolowski (fast) algorithm.
|
||||||
|
writer.write(b"N")
|
||||||
|
else:
|
||||||
|
# Run two-wesolowski (slow) algorithm.
|
||||||
|
writer.write(b"T")
|
||||||
await writer.drain()
|
await writer.drain()
|
||||||
|
|
||||||
prefix = str(len(str(disc)))
|
prefix = str(len(str(disc)))
|
||||||
|
@ -785,8 +798,20 @@ class Timelord:
|
||||||
return
|
return
|
||||||
|
|
||||||
log.info("Got handshake with VDF client.")
|
log.info("Got handshake with VDF client.")
|
||||||
async with self.lock:
|
if not self.sanitizer_mode:
|
||||||
self.allows_iters.append(chain)
|
async with self.lock:
|
||||||
|
self.allows_iters.append(chain)
|
||||||
|
else:
|
||||||
|
async with self.lock:
|
||||||
|
assert chain is Chain.BLUEBOX
|
||||||
|
assert bluebox_iteration is not None
|
||||||
|
prefix = str(len(str(bluebox_iteration)))
|
||||||
|
if len(str(bluebox_iteration)) < 10:
|
||||||
|
prefix = "0" + prefix
|
||||||
|
iter_str = prefix + str(bluebox_iteration)
|
||||||
|
writer.write(iter_str.encode())
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
# Listen to the client until "STOP" is received.
|
# Listen to the client until "STOP" is received.
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
|
@ -842,13 +867,14 @@ class Timelord:
|
||||||
# Verifies our own proof just in case
|
# Verifies our own proof just in case
|
||||||
form_size = ClassgroupElement.get_size(self.constants)
|
form_size = ClassgroupElement.get_size(self.constants)
|
||||||
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
|
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
|
||||||
time_taken = time.time() - self.chain_start_time[chain]
|
if not self.sanitizer_mode:
|
||||||
ips = int(iterations_needed / time_taken * 10) / 10
|
time_taken = time.time() - self.chain_start_time[chain]
|
||||||
log.info(
|
ips = int(iterations_needed / time_taken * 10) / 10
|
||||||
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
|
log.info(
|
||||||
f" iters, "
|
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
|
||||||
f"Estimated IPS: {ips}, Chain: {chain}"
|
f" iters, "
|
||||||
)
|
f"Estimated IPS: {ips}, Chain: {chain}"
|
||||||
|
)
|
||||||
|
|
||||||
vdf_info: VDFInfo = VDFInfo(
|
vdf_info: VDFInfo = VDFInfo(
|
||||||
challenge,
|
challenge,
|
||||||
|
@ -858,11 +884,65 @@ class Timelord:
|
||||||
vdf_proof: VDFProof = VDFProof(
|
vdf_proof: VDFProof = VDFProof(
|
||||||
witness_type,
|
witness_type,
|
||||||
proof_bytes,
|
proof_bytes,
|
||||||
|
self.sanitizer_mode,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
|
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
|
||||||
log.error("Invalid proof of time!")
|
log.error("Invalid proof of time!")
|
||||||
async with self.lock:
|
if not self.sanitizer_mode:
|
||||||
self.proofs_finished.append((chain, vdf_info, vdf_proof))
|
async with self.lock:
|
||||||
|
self.proofs_finished.append((chain, vdf_info, vdf_proof))
|
||||||
|
else:
|
||||||
|
async with self.lock:
|
||||||
|
writer.write(b"010")
|
||||||
|
await writer.drain()
|
||||||
|
assert header_hash is not None
|
||||||
|
assert field_vdf is not None
|
||||||
|
assert height is not None
|
||||||
|
response = timelord_protocol.RespondCompactProofOfTime(
|
||||||
|
vdf_info, vdf_proof, header_hash, height, field_vdf
|
||||||
|
)
|
||||||
|
if self.server is not None:
|
||||||
|
message = make_msg(ProtocolMessageTypes.respond_compact_vdf_timelord, response)
|
||||||
|
await self.server.send_to_all([message], NodeType.FULL_NODE)
|
||||||
except ConnectionResetError as e:
|
except ConnectionResetError as e:
|
||||||
log.info(f"Connection reset with VDF client {e}")
|
log.info(f"Connection reset with VDF client {e}")
|
||||||
|
|
||||||
|
async def _manage_discriminant_queue_sanitizer(self):
|
||||||
|
while not self._shut_down:
|
||||||
|
async with self.lock:
|
||||||
|
try:
|
||||||
|
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
|
||||||
|
# Select randomly the field_vdf we're creating a compact vdf for.
|
||||||
|
# This is done because CC_SP and CC_IP are more frequent than
|
||||||
|
# CC_EOS and ICC_EOS. This guarantees everything is picked uniformly.
|
||||||
|
target_field_vdf = random.randint(1, 4)
|
||||||
|
info = next(
|
||||||
|
(info for info in self.pending_bluebox_info if info.field_vdf == target_field_vdf),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if info is None:
|
||||||
|
# Nothing found with target_field_vdf, just pick the first VDFInfo.
|
||||||
|
info = self.pending_bluebox_info[0]
|
||||||
|
ip, reader, writer = self.free_clients[0]
|
||||||
|
self.process_communication_tasks.append(
|
||||||
|
asyncio.create_task(
|
||||||
|
self._do_process_communication(
|
||||||
|
Chain.BLUEBOX,
|
||||||
|
info.new_proof_of_time.challenge,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
ip,
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
info.new_proof_of_time.number_of_iterations,
|
||||||
|
info.header_hash,
|
||||||
|
info.height,
|
||||||
|
info.field_vdf,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.pending_bluebox_info.remove(info)
|
||||||
|
self.free_clients = self.free_clients[1:]
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Exception manage discriminant queue: {e}")
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
|
@ -21,6 +21,8 @@ class TimelordAPI:
|
||||||
@api_request
|
@api_request
|
||||||
async def new_peak_timelord(self, new_peak: timelord_protocol.NewPeakTimelord):
|
async def new_peak_timelord(self, new_peak: timelord_protocol.NewPeakTimelord):
|
||||||
async with self.timelord.lock:
|
async with self.timelord.lock:
|
||||||
|
if self.timelord.sanitizer_mode:
|
||||||
|
return
|
||||||
if new_peak.reward_chain_block.weight > self.timelord.last_state.get_weight():
|
if new_peak.reward_chain_block.weight > self.timelord.last_state.get_weight():
|
||||||
log.info("Not skipping peak, don't have. Maybe we are not the fastest timelord")
|
log.info("Not skipping peak, don't have. Maybe we are not the fastest timelord")
|
||||||
log.info(
|
log.info(
|
||||||
|
@ -42,6 +44,8 @@ class TimelordAPI:
|
||||||
@api_request
|
@api_request
|
||||||
async def new_unfinished_block(self, new_unfinished_block: timelord_protocol.NewUnfinishedBlock):
|
async def new_unfinished_block(self, new_unfinished_block: timelord_protocol.NewUnfinishedBlock):
|
||||||
async with self.timelord.lock:
|
async with self.timelord.lock:
|
||||||
|
if self.timelord.sanitizer_mode:
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
sp_iters, ip_iters = iters_from_block(
|
sp_iters, ip_iters = iters_from_block(
|
||||||
self.timelord.constants,
|
self.timelord.constants,
|
||||||
|
@ -64,3 +68,10 @@ class TimelordAPI:
|
||||||
self.timelord.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
|
self.timelord.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
|
||||||
self.timelord.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
|
self.timelord.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
|
||||||
self.timelord.total_unfinished += 1
|
self.timelord.total_unfinished += 1
|
||||||
|
|
||||||
|
@api_request
|
||||||
|
async def request_compact_proof_of_time(self, vdf_info: timelord_protocol.RequestCompactProofOfTime):
|
||||||
|
async with self.timelord.lock:
|
||||||
|
if not self.timelord.sanitizer_mode:
|
||||||
|
return
|
||||||
|
self.timelord.pending_bluebox_info.append(vdf_info)
|
||||||
|
|
|
@ -5,6 +5,7 @@ class Chain(Enum):
|
||||||
CHALLENGE_CHAIN = 1
|
CHALLENGE_CHAIN = 1
|
||||||
REWARD_CHAIN = 2
|
REWARD_CHAIN = 2
|
||||||
INFUSED_CHALLENGE_CHAIN = 3
|
INFUSED_CHALLENGE_CHAIN = 3
|
||||||
|
BLUEBOX = 4
|
||||||
|
|
||||||
|
|
||||||
class IterationType(Enum):
|
class IterationType(Enum):
|
||||||
|
|
|
@ -10,6 +10,7 @@ from chiavdf import verify_n_wesolowski
|
||||||
from src.util.ints import uint8, uint64
|
from src.util.ints import uint8, uint64
|
||||||
from src.util.streamable import Streamable, streamable
|
from src.util.streamable import Streamable, streamable
|
||||||
from src.consensus.constants import ConsensusConstants
|
from src.consensus.constants import ConsensusConstants
|
||||||
|
from enum import IntEnum
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -49,6 +50,7 @@ class VDFInfo(Streamable):
|
||||||
class VDFProof(Streamable):
|
class VDFProof(Streamable):
|
||||||
witness_type: uint8
|
witness_type: uint8
|
||||||
witness: bytes
|
witness: bytes
|
||||||
|
normalized_to_identity: bool
|
||||||
|
|
||||||
def is_valid(
|
def is_valid(
|
||||||
self,
|
self,
|
||||||
|
@ -79,3 +81,11 @@ class VDFProof(Streamable):
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Stores, for a given VDF, the field that uses it.
|
||||||
|
class CompressibleVDFField(IntEnum):
|
||||||
|
CC_EOS_VDF = 1
|
||||||
|
ICC_EOS_VDF = 2
|
||||||
|
CC_SP_VDF = 3
|
||||||
|
CC_IP_VDF = 4
|
||||||
|
|
|
@ -136,6 +136,26 @@ class FullBlock(Streamable):
|
||||||
|
|
||||||
return removals, additions
|
return removals, additions
|
||||||
|
|
||||||
|
def is_fully_compactified(self) -> bool:
|
||||||
|
for sub_slot in self.finished_sub_slots:
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.challenge_chain_slot_proof.witness_type != 0
|
||||||
|
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
|
||||||
|
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type != 0
|
||||||
|
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
if self.challenge_chain_sp_proof is not None and (
|
||||||
|
self.challenge_chain_sp_proof.witness_type != 0 or not self.challenge_chain_sp_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
return False
|
||||||
|
if self.challenge_chain_ip_proof.witness_type != 0 or not self.challenge_chain_ip_proof.normalized_to_identity:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def additions_for_npc(npc_list: List[NPC]) -> List[Coin]:
|
def additions_for_npc(npc_list: List[NPC]) -> List[Coin]:
|
||||||
additions: List[Coin] = []
|
additions: List[Coin] = []
|
||||||
|
|
|
@ -267,6 +267,7 @@ class BlockTools:
|
||||||
force_overflow: bool = False,
|
force_overflow: bool = False,
|
||||||
skip_slots: int = 0, # Force at least this number of empty slots before the first SB
|
skip_slots: int = 0, # Force at least this number of empty slots before the first SB
|
||||||
guarantee_transaction_block: bool = False, # Force that this block must be a tx block
|
guarantee_transaction_block: bool = False, # Force that this block must be a tx block
|
||||||
|
normalized_to_identity: bool = False, # CC_EOS, ICC_EOS, CC_SP, CC_IP vdf proofs are normalized to identity.
|
||||||
) -> List[FullBlock]:
|
) -> List[FullBlock]:
|
||||||
assert num_blocks > 0
|
assert num_blocks > 0
|
||||||
if block_list_input is not None:
|
if block_list_input is not None:
|
||||||
|
@ -364,6 +365,7 @@ class BlockTools:
|
||||||
uint8(signage_point_index),
|
uint8(signage_point_index),
|
||||||
finished_sub_slots_at_sp,
|
finished_sub_slots_at_sp,
|
||||||
sub_slot_iters,
|
sub_slot_iters,
|
||||||
|
normalized_to_identity,
|
||||||
)
|
)
|
||||||
if signage_point_index == 0:
|
if signage_point_index == 0:
|
||||||
cc_sp_output_hash: bytes32 = slot_cc_challenge
|
cc_sp_output_hash: bytes32 = slot_cc_challenge
|
||||||
|
@ -435,6 +437,7 @@ class BlockTools:
|
||||||
signage_point,
|
signage_point,
|
||||||
latest_block,
|
latest_block,
|
||||||
seed,
|
seed,
|
||||||
|
normalized_to_identity=normalized_to_identity,
|
||||||
)
|
)
|
||||||
if block_record.is_transaction_block:
|
if block_record.is_transaction_block:
|
||||||
transaction_data_included = True
|
transaction_data_included = True
|
||||||
|
@ -495,7 +498,14 @@ class BlockTools:
|
||||||
# End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
|
# End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
|
||||||
# in order for light clients to validate.
|
# in order for light clients to validate.
|
||||||
cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)
|
cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)
|
||||||
|
if normalized_to_identity:
|
||||||
|
_, cc_proof = get_vdf_info_and_proof(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
cc_vdf.challenge,
|
||||||
|
sub_slot_iters,
|
||||||
|
True,
|
||||||
|
)
|
||||||
if pending_ses:
|
if pending_ses:
|
||||||
sub_epoch_summary: Optional[SubEpochSummary] = None
|
sub_epoch_summary: Optional[SubEpochSummary] = None
|
||||||
else:
|
else:
|
||||||
|
@ -538,6 +548,14 @@ class BlockTools:
|
||||||
icc_eos_iters,
|
icc_eos_iters,
|
||||||
icc_ip_vdf.output,
|
icc_ip_vdf.output,
|
||||||
)
|
)
|
||||||
|
if normalized_to_identity:
|
||||||
|
_, icc_ip_proof = get_vdf_info_and_proof(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
icc_ip_vdf.challenge,
|
||||||
|
icc_eos_iters,
|
||||||
|
True,
|
||||||
|
)
|
||||||
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_ip_vdf)
|
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_ip_vdf)
|
||||||
assert icc_sub_slot is not None
|
assert icc_sub_slot is not None
|
||||||
icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None
|
icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None
|
||||||
|
@ -600,6 +618,7 @@ class BlockTools:
|
||||||
uint8(signage_point_index),
|
uint8(signage_point_index),
|
||||||
finished_sub_slots_eos,
|
finished_sub_slots_eos,
|
||||||
sub_slot_iters,
|
sub_slot_iters,
|
||||||
|
normalized_to_identity,
|
||||||
)
|
)
|
||||||
if signage_point_index == 0:
|
if signage_point_index == 0:
|
||||||
cc_sp_output_hash = slot_cc_challenge
|
cc_sp_output_hash = slot_cc_challenge
|
||||||
|
@ -661,6 +680,7 @@ class BlockTools:
|
||||||
seed,
|
seed,
|
||||||
overflow_cc_challenge=overflow_cc_challenge,
|
overflow_cc_challenge=overflow_cc_challenge,
|
||||||
overflow_rc_challenge=overflow_rc_challenge,
|
overflow_rc_challenge=overflow_rc_challenge,
|
||||||
|
normalized_to_identity=normalized_to_identity,
|
||||||
)
|
)
|
||||||
|
|
||||||
if block_record.is_transaction_block:
|
if block_record.is_transaction_block:
|
||||||
|
@ -942,6 +962,7 @@ def get_signage_point(
|
||||||
signage_point_index: uint8,
|
signage_point_index: uint8,
|
||||||
finished_sub_slots: List[EndOfSubSlotBundle],
|
finished_sub_slots: List[EndOfSubSlotBundle],
|
||||||
sub_slot_iters: uint64,
|
sub_slot_iters: uint64,
|
||||||
|
normalized_to_identity: bool = False,
|
||||||
) -> SignagePoint:
|
) -> SignagePoint:
|
||||||
if signage_point_index == 0:
|
if signage_point_index == 0:
|
||||||
return SignagePoint(None, None, None, None)
|
return SignagePoint(None, None, None, None)
|
||||||
|
@ -981,6 +1002,14 @@ def get_signage_point(
|
||||||
rc_vdf_iters,
|
rc_vdf_iters,
|
||||||
)
|
)
|
||||||
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
|
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
|
||||||
|
if normalized_to_identity:
|
||||||
|
_, cc_sp_proof = get_vdf_info_and_proof(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
cc_sp_vdf.challenge,
|
||||||
|
sp_iters,
|
||||||
|
True,
|
||||||
|
)
|
||||||
return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof)
|
return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof)
|
||||||
|
|
||||||
|
|
||||||
|
@ -999,6 +1028,7 @@ def finish_block(
|
||||||
latest_block: BlockRecord,
|
latest_block: BlockRecord,
|
||||||
sub_slot_iters: uint64,
|
sub_slot_iters: uint64,
|
||||||
difficulty: uint64,
|
difficulty: uint64,
|
||||||
|
normalized_to_identity: bool = False,
|
||||||
):
|
):
|
||||||
is_overflow = is_overflow_block(constants, signage_point_index)
|
is_overflow = is_overflow_block(constants, signage_point_index)
|
||||||
cc_vdf_challenge = slot_cc_challenge
|
cc_vdf_challenge = slot_cc_challenge
|
||||||
|
@ -1017,6 +1047,14 @@ def finish_block(
|
||||||
new_ip_iters,
|
new_ip_iters,
|
||||||
)
|
)
|
||||||
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
|
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
|
||||||
|
if normalized_to_identity:
|
||||||
|
_, cc_ip_proof = get_vdf_info_and_proof(
|
||||||
|
constants,
|
||||||
|
ClassgroupElement.get_default_element(),
|
||||||
|
cc_ip_vdf.challenge,
|
||||||
|
ip_iters,
|
||||||
|
True,
|
||||||
|
)
|
||||||
deficit = calculate_deficit(
|
deficit = calculate_deficit(
|
||||||
constants,
|
constants,
|
||||||
uint32(latest_block.height + 1),
|
uint32(latest_block.height + 1),
|
||||||
|
@ -1215,6 +1253,7 @@ def get_full_block_and_block_record(
|
||||||
seed: bytes = b"",
|
seed: bytes = b"",
|
||||||
overflow_cc_challenge: bytes32 = None,
|
overflow_cc_challenge: bytes32 = None,
|
||||||
overflow_rc_challenge: bytes32 = None,
|
overflow_rc_challenge: bytes32 = None,
|
||||||
|
normalized_to_identity: bool = False,
|
||||||
) -> Tuple[FullBlock, BlockRecord]:
|
) -> Tuple[FullBlock, BlockRecord]:
|
||||||
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
|
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
|
||||||
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
|
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
|
||||||
|
@ -1261,6 +1300,7 @@ def get_full_block_and_block_record(
|
||||||
prev_block,
|
prev_block,
|
||||||
sub_slot_iters,
|
sub_slot_iters,
|
||||||
difficulty,
|
difficulty,
|
||||||
|
normalized_to_identity,
|
||||||
)
|
)
|
||||||
|
|
||||||
return full_block, block_record
|
return full_block, block_record
|
||||||
|
|
|
@ -213,10 +213,10 @@ full_node:
|
||||||
recent_peer_threshold: 6000
|
recent_peer_threshold: 6000
|
||||||
|
|
||||||
# Send to a Bluebox (sanatizing timelord) uncompact blocks once every
|
# Send to a Bluebox (sanatizing timelord) uncompact blocks once every
|
||||||
# 'send_uncompact_interval' seconds. The recommended value is
|
# 'send_uncompact_interval' seconds. Set to 0 if you don't use this feature.
|
||||||
# send_uncompact_interval=1800. This sends 50 proofs every 30 minutes.
|
|
||||||
# Set to 0 if you don't use this feature.
|
|
||||||
send_uncompact_interval: 0
|
send_uncompact_interval: 0
|
||||||
|
# At every 'send_uncompact_interval' seconds, send blueboxes 'target_uncompact_proofs' proofs to be normalized.
|
||||||
|
target_uncompact_proofs: 100
|
||||||
|
|
||||||
farmer_peer:
|
farmer_peer:
|
||||||
host: *self_hostname
|
host: *self_hostname
|
||||||
|
|
|
@ -14,6 +14,7 @@ def get_vdf_info_and_proof(
|
||||||
vdf_input: ClassgroupElement,
|
vdf_input: ClassgroupElement,
|
||||||
challenge_hash: bytes32,
|
challenge_hash: bytes32,
|
||||||
number_iters: uint64,
|
number_iters: uint64,
|
||||||
|
normalized_to_identity: bool = False,
|
||||||
) -> Tuple[VDFInfo, VDFProof]:
|
) -> Tuple[VDFInfo, VDFProof]:
|
||||||
form_size = ClassgroupElement.get_size(constants)
|
form_size = ClassgroupElement.get_size(constants)
|
||||||
result: bytes = prove(
|
result: bytes = prove(
|
||||||
|
@ -25,4 +26,4 @@ def get_vdf_info_and_proof(
|
||||||
|
|
||||||
output = ClassgroupElement.from_bytes(result[:form_size])
|
output = ClassgroupElement.from_bytes(result[:form_size])
|
||||||
proof_bytes = result[form_size : 2 * form_size]
|
proof_bytes = result[form_size : 2 * form_size]
|
||||||
return VDFInfo(challenge_hash, number_iters, output), VDFProof(uint8(0), proof_bytes)
|
return VDFInfo(challenge_hash, number_iters, output), VDFProof(uint8(0), proof_bytes, normalized_to_identity)
|
||||||
|
|
|
@ -28,6 +28,7 @@ from tests.core.fixtures import empty_blockchain # noqa: F401
|
||||||
from tests.core.fixtures import default_1000_blocks # noqa: F401
|
from tests.core.fixtures import default_1000_blocks # noqa: F401
|
||||||
from tests.core.fixtures import default_400_blocks # noqa: F401
|
from tests.core.fixtures import default_400_blocks # noqa: F401
|
||||||
from tests.core.fixtures import default_10000_blocks # noqa: F401
|
from tests.core.fixtures import default_10000_blocks # noqa: F401
|
||||||
|
from tests.core.fixtures import default_10000_blocks_compact # noqa: F401
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
bad_element = ClassgroupElement.from_bytes(b"\x00")
|
bad_element = ClassgroupElement.from_bytes(b"\x00")
|
||||||
|
@ -507,7 +508,7 @@ class TestBlockHeaderValidation:
|
||||||
new_finished_ss_5 = recursive_replace(
|
new_finished_ss_5 = recursive_replace(
|
||||||
block.finished_sub_slots[-1],
|
block.finished_sub_slots[-1],
|
||||||
"proofs.infused_challenge_chain_slot_proof",
|
"proofs.infused_challenge_chain_slot_proof",
|
||||||
VDFProof(uint8(0), b"1239819023890"),
|
VDFProof(uint8(0), b"1239819023890", False),
|
||||||
)
|
)
|
||||||
block_bad_5 = recursive_replace(
|
block_bad_5 = recursive_replace(
|
||||||
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
||||||
|
@ -738,7 +739,7 @@ class TestBlockHeaderValidation:
|
||||||
new_finished_ss_5 = recursive_replace(
|
new_finished_ss_5 = recursive_replace(
|
||||||
block.finished_sub_slots[-1],
|
block.finished_sub_slots[-1],
|
||||||
"proofs.challenge_chain_slot_proof",
|
"proofs.challenge_chain_slot_proof",
|
||||||
VDFProof(uint8(0), b"1239819023890"),
|
VDFProof(uint8(0), b"1239819023890", False),
|
||||||
)
|
)
|
||||||
block_bad_5 = recursive_replace(
|
block_bad_5 = recursive_replace(
|
||||||
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
||||||
|
@ -808,7 +809,7 @@ class TestBlockHeaderValidation:
|
||||||
new_finished_ss_5 = recursive_replace(
|
new_finished_ss_5 = recursive_replace(
|
||||||
block.finished_sub_slots[-1],
|
block.finished_sub_slots[-1],
|
||||||
"proofs.reward_chain_slot_proof",
|
"proofs.reward_chain_slot_proof",
|
||||||
VDFProof(uint8(0), b"1239819023890"),
|
VDFProof(uint8(0), b"1239819023890", False),
|
||||||
)
|
)
|
||||||
block_bad_5 = recursive_replace(
|
block_bad_5 = recursive_replace(
|
||||||
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5]
|
||||||
|
@ -1053,7 +1054,7 @@ class TestBlockHeaderValidation:
|
||||||
block_bad = recursive_replace(
|
block_bad = recursive_replace(
|
||||||
blocks[-1],
|
blocks[-1],
|
||||||
"reward_chain_sp_proof",
|
"reward_chain_sp_proof",
|
||||||
VDFProof(uint8(0), std_hash(b"")),
|
VDFProof(uint8(0), std_hash(b""), False),
|
||||||
)
|
)
|
||||||
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF
|
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF
|
||||||
return
|
return
|
||||||
|
@ -1095,7 +1096,7 @@ class TestBlockHeaderValidation:
|
||||||
block_bad = recursive_replace(
|
block_bad = recursive_replace(
|
||||||
blocks[-1],
|
blocks[-1],
|
||||||
"challenge_chain_sp_proof",
|
"challenge_chain_sp_proof",
|
||||||
VDFProof(uint8(0), std_hash(b"")),
|
VDFProof(uint8(0), std_hash(b""), False),
|
||||||
)
|
)
|
||||||
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_SP_VDF
|
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_SP_VDF
|
||||||
return
|
return
|
||||||
|
@ -1412,7 +1413,7 @@ class TestBlockHeaderValidation:
|
||||||
block_bad = recursive_replace(
|
block_bad = recursive_replace(
|
||||||
blocks[-1],
|
blocks[-1],
|
||||||
"challenge_chain_ip_proof",
|
"challenge_chain_ip_proof",
|
||||||
VDFProof(uint8(0), std_hash(b"")),
|
VDFProof(uint8(0), std_hash(b""), False),
|
||||||
)
|
)
|
||||||
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF
|
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF
|
||||||
|
|
||||||
|
@ -1440,7 +1441,7 @@ class TestBlockHeaderValidation:
|
||||||
block_bad = recursive_replace(
|
block_bad = recursive_replace(
|
||||||
blocks[-1],
|
blocks[-1],
|
||||||
"reward_chain_ip_proof",
|
"reward_chain_ip_proof",
|
||||||
VDFProof(uint8(0), std_hash(b"")),
|
VDFProof(uint8(0), std_hash(b""), False),
|
||||||
)
|
)
|
||||||
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF
|
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF
|
||||||
|
|
||||||
|
@ -1471,7 +1472,7 @@ class TestBlockHeaderValidation:
|
||||||
block_bad = recursive_replace(
|
block_bad = recursive_replace(
|
||||||
blocks[-1],
|
blocks[-1],
|
||||||
"infused_challenge_chain_ip_proof",
|
"infused_challenge_chain_ip_proof",
|
||||||
VDFProof(uint8(0), std_hash(b"")),
|
VDFProof(uint8(0), std_hash(b""), False),
|
||||||
)
|
)
|
||||||
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF
|
assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF
|
||||||
|
|
||||||
|
@ -1581,6 +1582,13 @@ class TestReorgs:
|
||||||
assert b.get_peak().weight > chain_1_weight
|
assert b.get_peak().weight > chain_1_weight
|
||||||
assert b.get_peak().height < chain_1_height
|
assert b.get_peak().height < chain_1_height
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_long_compact_blockchain(self, empty_blockchain, default_10000_blocks_compact):
|
||||||
|
b = empty_blockchain
|
||||||
|
for block in default_10000_blocks_compact:
|
||||||
|
assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK
|
||||||
|
assert b.get_peak().height == len(default_10000_blocks_compact) - 1
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_reorg_from_genesis(self, empty_blockchain):
|
async def test_reorg_from_genesis(self, empty_blockchain):
|
||||||
b = empty_blockchain
|
b = empty_blockchain
|
|
@ -33,7 +33,7 @@ async def empty_blockchain():
|
||||||
db_path.unlink()
|
db_path.unlink()
|
||||||
|
|
||||||
|
|
||||||
block_format_version = "b28_1"
|
block_format_version = "rc4_nofixtureyet"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -63,7 +63,18 @@ async def default_20000_blocks():
|
||||||
return persistent_blocks(20000, f"test_blocks_20000_{block_format_version}.db")
|
return persistent_blocks(20000, f"test_blocks_20000_{block_format_version}.db")
|
||||||
|
|
||||||
|
|
||||||
def persistent_blocks(num_of_blocks: int, db_name: str, seed: bytes = b"", empty_sub_slots=0):
|
@pytest.fixture(scope="session")
|
||||||
|
async def default_10000_blocks_compact():
|
||||||
|
return persistent_blocks(10000, f"test_blocks_10000_compact_{block_format_version}.db", normalized_to_identity=True)
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_blocks(
|
||||||
|
num_of_blocks: int,
|
||||||
|
db_name: str,
|
||||||
|
seed: bytes = b"",
|
||||||
|
empty_sub_slots=0,
|
||||||
|
normalized_to_identity: bool = False,
|
||||||
|
):
|
||||||
# try loading from disc, if not create new blocks.db file
|
# try loading from disc, if not create new blocks.db file
|
||||||
# TODO hash fixtures.py and blocktool.py, add to path, delete if the files changed
|
# TODO hash fixtures.py and blocktool.py, add to path, delete if the files changed
|
||||||
block_path_dir = Path("~/.chia/blocks").expanduser()
|
block_path_dir = Path("~/.chia/blocks").expanduser()
|
||||||
|
@ -85,12 +96,19 @@ def persistent_blocks(num_of_blocks: int, db_name: str, seed: bytes = b"", empty
|
||||||
except EOFError:
|
except EOFError:
|
||||||
print("\n error reading db file")
|
print("\n error reading db file")
|
||||||
|
|
||||||
return new_test_db(file_path, num_of_blocks, seed, empty_sub_slots)
|
return new_test_db(file_path, num_of_blocks, seed, empty_sub_slots, normalized_to_identity)
|
||||||
|
|
||||||
|
|
||||||
def new_test_db(path: Path, num_of_blocks: int, seed: bytes, empty_sub_slots: int):
|
def new_test_db(
|
||||||
|
path: Path, num_of_blocks: int, seed: bytes, empty_sub_slots: int, normalized_to_identity: bool = False
|
||||||
|
):
|
||||||
print(f"create {path} with {num_of_blocks} blocks with ")
|
print(f"create {path} with {num_of_blocks} blocks with ")
|
||||||
blocks: List[FullBlock] = bt.get_consecutive_blocks(num_of_blocks, seed=seed, skip_slots=empty_sub_slots)
|
blocks: List[FullBlock] = bt.get_consecutive_blocks(
|
||||||
|
num_of_blocks,
|
||||||
|
seed=seed,
|
||||||
|
skip_slots=empty_sub_slots,
|
||||||
|
normalized_to_identity=normalized_to_identity,
|
||||||
|
)
|
||||||
block_bytes_list: List[bytes] = []
|
block_bytes_list: List[bytes] = []
|
||||||
for block in blocks:
|
for block in blocks:
|
||||||
block_bytes_list.append(bytes(block))
|
block_bytes_list.append(bytes(block))
|
||||||
|
|
|
@ -18,6 +18,7 @@ from src.util.ints import uint32, uint8, uint128, uint64
|
||||||
from tests.setup_nodes import test_constants, bt
|
from tests.setup_nodes import test_constants, bt
|
||||||
from src.util.block_tools import get_signage_point
|
from src.util.block_tools import get_signage_point
|
||||||
from tests.core.fixtures import empty_blockchain # noqa: F401
|
from tests.core.fixtures import empty_blockchain # noqa: F401
|
||||||
|
from src.util.block_cache import BlockCache
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
@ -31,9 +32,9 @@ def event_loop():
|
||||||
|
|
||||||
class TestFullNodeStore:
|
class TestFullNodeStore:
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_basic_store(self, empty_blockchain):
|
async def test_basic_store(self, empty_blockchain, normalized_to_identity: bool = False):
|
||||||
blockchain = empty_blockchain
|
blockchain = empty_blockchain
|
||||||
blocks = bt.get_consecutive_blocks(10, seed=b"1234")
|
blocks = bt.get_consecutive_blocks(10, seed=b"1234", normalized_to_identity=normalized_to_identity)
|
||||||
|
|
||||||
store = await FullNodeStore.create(test_constants)
|
store = await FullNodeStore.create(test_constants)
|
||||||
|
|
||||||
|
@ -77,7 +78,7 @@ class TestFullNodeStore:
|
||||||
store.remove_unfinished_block(unf_block.partial_hash)
|
store.remove_unfinished_block(unf_block.partial_hash)
|
||||||
assert store.get_unfinished_block(unf_block.partial_hash) is None
|
assert store.get_unfinished_block(unf_block.partial_hash) is None
|
||||||
|
|
||||||
blocks = bt.get_consecutive_blocks(1, skip_slots=5)
|
blocks = bt.get_consecutive_blocks(1, skip_slots=5, normalized_to_identity=normalized_to_identity)
|
||||||
sub_slots = blocks[0].finished_sub_slots
|
sub_slots = blocks[0].finished_sub_slots
|
||||||
assert len(sub_slots) == 5
|
assert len(sub_slots) == 5
|
||||||
|
|
||||||
|
@ -140,8 +141,8 @@ class TestFullNodeStore:
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test adding non genesis peak directly
|
# Test adding non genesis peak directly
|
||||||
blocks = bt.get_consecutive_blocks(2, skip_slots=2)
|
blocks = bt.get_consecutive_blocks(2, skip_slots=2, normalized_to_identity=normalized_to_identity)
|
||||||
blocks = bt.get_consecutive_blocks(3, block_list_input=blocks)
|
blocks = bt.get_consecutive_blocks(3, block_list_input=blocks, normalized_to_identity=normalized_to_identity)
|
||||||
for block in blocks:
|
for block in blocks:
|
||||||
await blockchain.receive_block(block)
|
await blockchain.receive_block(block)
|
||||||
sb = blockchain.block_record(block.header_hash)
|
sb = blockchain.block_record(block.header_hash)
|
||||||
|
@ -150,7 +151,7 @@ class TestFullNodeStore:
|
||||||
assert res[0] is None
|
assert res[0] is None
|
||||||
|
|
||||||
# Add reorg blocks
|
# Add reorg blocks
|
||||||
blocks_reorg = bt.get_consecutive_blocks(20)
|
blocks_reorg = bt.get_consecutive_blocks(20, normalized_to_identity=normalized_to_identity)
|
||||||
for block in blocks_reorg:
|
for block in blocks_reorg:
|
||||||
res, _, _ = await blockchain.receive_block(block)
|
res, _, _ = await blockchain.receive_block(block)
|
||||||
if res == ReceiveBlockResult.NEW_PEAK:
|
if res == ReceiveBlockResult.NEW_PEAK:
|
||||||
|
@ -160,7 +161,9 @@ class TestFullNodeStore:
|
||||||
assert res[0] is None
|
assert res[0] is None
|
||||||
|
|
||||||
# Add slots to the end
|
# Add slots to the end
|
||||||
blocks_2 = bt.get_consecutive_blocks(1, block_list_input=blocks_reorg, skip_slots=2)
|
blocks_2 = bt.get_consecutive_blocks(
|
||||||
|
1, block_list_input=blocks_reorg, skip_slots=2, normalized_to_identity=normalized_to_identity
|
||||||
|
)
|
||||||
for slot in blocks_2[-1].finished_sub_slots:
|
for slot in blocks_2[-1].finished_sub_slots:
|
||||||
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak())
|
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak())
|
||||||
|
|
||||||
|
@ -184,7 +187,9 @@ class TestFullNodeStore:
|
||||||
|
|
||||||
blocks = blocks_reorg
|
blocks = blocks_reorg
|
||||||
while True:
|
while True:
|
||||||
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks)
|
blocks = bt.get_consecutive_blocks(
|
||||||
|
1, block_list_input=blocks, normalized_to_identity=normalized_to_identity
|
||||||
|
)
|
||||||
res, _, _ = await blockchain.receive_block(blocks[-1])
|
res, _, _ = await blockchain.receive_block(blocks[-1])
|
||||||
if res == ReceiveBlockResult.NEW_PEAK:
|
if res == ReceiveBlockResult.NEW_PEAK:
|
||||||
sb = blockchain.block_record(blocks[-1].header_hash)
|
sb = blockchain.block_record(blocks[-1].header_hash)
|
||||||
|
@ -202,7 +207,9 @@ class TestFullNodeStore:
|
||||||
assert len(store.finished_sub_slots) == 2
|
assert len(store.finished_sub_slots) == 2
|
||||||
|
|
||||||
# Add slots to the end, except for the last one, which we will use to test invalid SP
|
# Add slots to the end, except for the last one, which we will use to test invalid SP
|
||||||
blocks_2 = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=3)
|
blocks_2 = bt.get_consecutive_blocks(
|
||||||
|
1, block_list_input=blocks, skip_slots=3, normalized_to_identity=normalized_to_identity
|
||||||
|
)
|
||||||
for slot in blocks_2[-1].finished_sub_slots[:-1]:
|
for slot in blocks_2[-1].finished_sub_slots[:-1]:
|
||||||
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak())
|
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak())
|
||||||
finished_sub_slots = blocks_2[-1].finished_sub_slots
|
finished_sub_slots = blocks_2[-1].finished_sub_slots
|
||||||
|
@ -256,6 +263,7 @@ class TestFullNodeStore:
|
||||||
finished_sub_slots[:slot_offset],
|
finished_sub_slots[:slot_offset],
|
||||||
peak.sub_slot_iters,
|
peak.sub_slot_iters,
|
||||||
)
|
)
|
||||||
|
assert sp.cc_vdf is not None
|
||||||
saved_sp_hash = sp.cc_vdf.output.get_hash()
|
saved_sp_hash = sp.cc_vdf.output.get_hash()
|
||||||
assert store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp)
|
assert store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp)
|
||||||
|
|
||||||
|
@ -312,7 +320,7 @@ class TestFullNodeStore:
|
||||||
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
|
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
|
||||||
sp = get_signage_point(
|
sp = get_signage_point(
|
||||||
test_constants,
|
test_constants,
|
||||||
{},
|
BlockCache({}, {}),
|
||||||
None,
|
None,
|
||||||
uint128(0),
|
uint128(0),
|
||||||
uint8(i),
|
uint8(i),
|
||||||
|
@ -321,7 +329,7 @@ class TestFullNodeStore:
|
||||||
)
|
)
|
||||||
assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp)
|
assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp)
|
||||||
|
|
||||||
blocks_3 = bt.get_consecutive_blocks(1, skip_slots=2)
|
blocks_3 = bt.get_consecutive_blocks(1, skip_slots=2, normalized_to_identity=normalized_to_identity)
|
||||||
for slot in blocks_3[-1].finished_sub_slots:
|
for slot in blocks_3[-1].finished_sub_slots:
|
||||||
store.new_finished_sub_slot(slot, {}, None)
|
store.new_finished_sub_slot(slot, {}, None)
|
||||||
assert len(store.finished_sub_slots) == 3
|
assert len(store.finished_sub_slots) == 3
|
||||||
|
@ -334,7 +342,7 @@ class TestFullNodeStore:
|
||||||
):
|
):
|
||||||
sp = get_signage_point(
|
sp = get_signage_point(
|
||||||
test_constants,
|
test_constants,
|
||||||
{},
|
BlockCache({}, {}),
|
||||||
None,
|
None,
|
||||||
slot_offset * peak.sub_slot_iters,
|
slot_offset * peak.sub_slot_iters,
|
||||||
uint8(i),
|
uint8(i),
|
||||||
|
@ -344,8 +352,10 @@ class TestFullNodeStore:
|
||||||
assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp)
|
assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp)
|
||||||
|
|
||||||
# Test adding signage points after genesis
|
# Test adding signage points after genesis
|
||||||
blocks_4 = bt.get_consecutive_blocks(1)
|
blocks_4 = bt.get_consecutive_blocks(1, normalized_to_identity=normalized_to_identity)
|
||||||
blocks_5 = bt.get_consecutive_blocks(1, block_list_input=blocks_4, skip_slots=1)
|
blocks_5 = bt.get_consecutive_blocks(
|
||||||
|
1, block_list_input=blocks_4, skip_slots=1, normalized_to_identity=normalized_to_identity
|
||||||
|
)
|
||||||
|
|
||||||
# If this is not the case, fix test to find a block that is
|
# If this is not the case, fix test to find a block that is
|
||||||
assert (
|
assert (
|
||||||
|
@ -359,7 +369,7 @@ class TestFullNodeStore:
|
||||||
sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA,
|
sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA,
|
||||||
test_constants.NUM_SPS_SUB_SLOT,
|
test_constants.NUM_SPS_SUB_SLOT,
|
||||||
):
|
):
|
||||||
if is_overflow_block(test_constants, i):
|
if is_overflow_block(test_constants, uint8(i)):
|
||||||
finished_sub_slots = blocks_5[-1].finished_sub_slots
|
finished_sub_slots = blocks_5[-1].finished_sub_slots
|
||||||
else:
|
else:
|
||||||
finished_sub_slots = []
|
finished_sub_slots = []
|
||||||
|
@ -377,10 +387,12 @@ class TestFullNodeStore:
|
||||||
|
|
||||||
# Test future EOS cache
|
# Test future EOS cache
|
||||||
store.initialize_genesis_sub_slot()
|
store.initialize_genesis_sub_slot()
|
||||||
blocks = bt.get_consecutive_blocks(1)
|
blocks = bt.get_consecutive_blocks(1, normalized_to_identity=normalized_to_identity)
|
||||||
await blockchain.receive_block(blocks[-1])
|
await blockchain.receive_block(blocks[-1])
|
||||||
while True:
|
while True:
|
||||||
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks)
|
blocks = bt.get_consecutive_blocks(
|
||||||
|
1, block_list_input=blocks, normalized_to_identity=normalized_to_identity
|
||||||
|
)
|
||||||
await blockchain.receive_block(blocks[-1])
|
await blockchain.receive_block(blocks[-1])
|
||||||
sb = blockchain.block_record(blocks[-1].header_hash)
|
sb = blockchain.block_record(blocks[-1].header_hash)
|
||||||
if sb.first_in_sub_slot:
|
if sb.first_in_sub_slot:
|
||||||
|
@ -404,7 +416,7 @@ class TestFullNodeStore:
|
||||||
|
|
||||||
# Test future IP cache
|
# Test future IP cache
|
||||||
store.initialize_genesis_sub_slot()
|
store.initialize_genesis_sub_slot()
|
||||||
blocks = bt.get_consecutive_blocks(60)
|
blocks = bt.get_consecutive_blocks(60, normalized_to_identity=normalized_to_identity)
|
||||||
|
|
||||||
for block in blocks[:5]:
|
for block in blocks[:5]:
|
||||||
await blockchain.receive_block(block)
|
await blockchain.receive_block(block)
|
||||||
|
@ -446,3 +458,7 @@ class TestFullNodeStore:
|
||||||
|
|
||||||
# If flaky, increase the number of blocks created
|
# If flaky, increase the number of blocks created
|
||||||
assert case_0 and case_1
|
assert case_0 and case_1
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_basic_store_compact_blockchain(self, empty_blockchain):
|
||||||
|
await self.test_basic_store(empty_blockchain, True)
|
||||||
|
|
|
@ -103,12 +103,13 @@ class TestFullSync:
|
||||||
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_3.full_node.on_connect
|
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_3.full_node.on_connect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
timeout_seconds = 120
|
||||||
# Node 3 and Node 2 sync up to node 1
|
# Node 3 and Node 2 sync up to node 1
|
||||||
await time_out_assert(
|
await time_out_assert(
|
||||||
90, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
|
timeout_seconds, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
|
||||||
)
|
)
|
||||||
await time_out_assert(
|
await time_out_assert(
|
||||||
90, node_height_exactly, True, full_node_3, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
|
timeout_seconds, node_height_exactly, True, full_node_3, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
cons = list(server_1.all_connections.values())[:]
|
cons = list(server_1.all_connections.values())[:]
|
||||||
|
@ -137,10 +138,10 @@ class TestFullSync:
|
||||||
)
|
)
|
||||||
|
|
||||||
# All four nodes are synced
|
# All four nodes are synced
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_1, num_blocks - 1)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, num_blocks - 1)
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_2, num_blocks - 1)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_2, num_blocks - 1)
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_3, num_blocks - 1)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_3, num_blocks - 1)
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_4, num_blocks - 1)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_4, num_blocks - 1)
|
||||||
|
|
||||||
# Deep reorg, fall back from batch sync to long sync
|
# Deep reorg, fall back from batch sync to long sync
|
||||||
blocks_node_5 = bt.get_consecutive_blocks(60, block_list_input=blocks[:350], seed=b"node5")
|
blocks_node_5 = bt.get_consecutive_blocks(60, block_list_input=blocks[:350], seed=b"node5")
|
||||||
|
@ -149,8 +150,8 @@ class TestFullSync:
|
||||||
await server_5.start_client(
|
await server_5.start_client(
|
||||||
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_5.full_node.on_connect
|
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_5.full_node.on_connect
|
||||||
)
|
)
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_5, 409)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_5, 409)
|
||||||
await time_out_assert(90, node_height_exactly, True, full_node_1, 409)
|
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, 409)
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_sync_from_fork_point_and_weight_proof(self, three_nodes, default_1000_blocks, default_400_blocks):
|
async def test_sync_from_fork_point_and_weight_proof(self, three_nodes, default_1000_blocks, default_400_blocks):
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
import pytest
|
|
||||||
from tests.core.full_node.test_full_sync import node_height_at_least
|
|
||||||
from tests.setup_nodes import setup_full_system, test_constants, self_hostname
|
|
||||||
from src.util.ints import uint16
|
|
||||||
from tests.time_out_assert import time_out_assert
|
|
||||||
from src.types.peer_info import PeerInfo
|
|
||||||
|
|
||||||
test_constants_modified = test_constants.replace(
|
|
||||||
**{
|
|
||||||
"MIN_PLOT_SIZE": 18,
|
|
||||||
"DIFFICULTY_STARTING": 2 ** 8,
|
|
||||||
"DISCRIMINANT_SIZE_BITS": 1024,
|
|
||||||
"SUB_EPOCH_BLOCKS": 140,
|
|
||||||
"WEIGHT_PROOF_THRESHOLD": 2,
|
|
||||||
"WEIGHT_PROOF_RECENT_BLOCKS": 350,
|
|
||||||
"MAX_SUB_SLOT_BLOCKS": 50,
|
|
||||||
"NUM_SPS_SUB_SLOT": 32, # Must be a power of 2
|
|
||||||
"EPOCH_BLOCKS": 280,
|
|
||||||
"SUB_SLOT_ITERS_STARTING": 2 ** 20,
|
|
||||||
"NUMBER_ZERO_BITS_PLOT_FILTER": 5,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSimulation:
|
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
async def simulation(self):
|
|
||||||
async for _ in setup_full_system(test_constants_modified):
|
|
||||||
yield _
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_simulation_1(self, simulation):
|
|
||||||
node1, node2, _, _, _, _, _, server1 = simulation
|
|
||||||
await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
|
|
||||||
# Use node2 to test node communication, since only node1 extends the chain.
|
|
||||||
await time_out_assert(1000, node_height_at_least, True, node2, 7)
|
|
||||||
|
|
||||||
|
|
||||||
# # async def has_compact(node1, node2, max_height):
|
|
||||||
# # for h in range(1, max_height):
|
|
||||||
# # blocks_1: List[FullBlock] = await node1.full_node.block_store.get_full_blocks_at([uint32(h)])
|
|
||||||
# # blocks_2: List[FullBlock] = await node2.full_node.block_store.get_full_blocks_at([uint32(h)])
|
|
||||||
# # has_compact_1 = False
|
|
||||||
# # has_compact_2 = False
|
|
||||||
# # for block in blocks_1:
|
|
||||||
# # assert block.proof_of_time is not None
|
|
||||||
# # if block.proof_of_time.witness_type == 0:
|
|
||||||
# # has_compact_1 = True
|
|
||||||
# # break
|
|
||||||
# # for block in blocks_2:
|
|
||||||
# # assert block.proof_of_time is not None
|
|
||||||
# # if block.proof_of_time.witness_type == 0:
|
|
||||||
# # has_compact_2 = True
|
|
||||||
# # break
|
|
||||||
# # if has_compact_1 and has_compact_2:
|
|
||||||
# # return True
|
|
||||||
# # return True
|
|
||||||
# #
|
|
||||||
# # await time_out_assert_custom_interval(120, 2, has_compact, True, node1, node2, max_height)
|
|
|
@ -57,7 +57,8 @@ async def setup_full_node(
|
||||||
config = bt.config["full_node"]
|
config = bt.config["full_node"]
|
||||||
config["database_path"] = db_name
|
config["database_path"] = db_name
|
||||||
config["send_uncompact_interval"] = send_uncompact_interval
|
config["send_uncompact_interval"] = send_uncompact_interval
|
||||||
config["peer_connect_interval"] = 3
|
config["target_uncompact_proofs"] = 30
|
||||||
|
config["peer_connect_interval"] = 50
|
||||||
if introducer_port is not None:
|
if introducer_port is not None:
|
||||||
config["introducer_peer"]["host"] = self_hostname
|
config["introducer_peer"]["host"] = self_hostname
|
||||||
config["introducer_peer"]["port"] = introducer_port
|
config["introducer_peer"]["port"] = introducer_port
|
||||||
|
@ -232,6 +233,19 @@ async def setup_introducer(port):
|
||||||
await service.wait_closed()
|
await service.wait_closed()
|
||||||
|
|
||||||
|
|
||||||
|
async def setup_vdf_client(port):
|
||||||
|
vdf_task_1 = asyncio.create_task(spawn_process(self_hostname, port, 1))
|
||||||
|
|
||||||
|
def stop():
|
||||||
|
asyncio.create_task(kill_processes())
|
||||||
|
|
||||||
|
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, stop)
|
||||||
|
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, stop)
|
||||||
|
|
||||||
|
yield vdf_task_1
|
||||||
|
await kill_processes()
|
||||||
|
|
||||||
|
|
||||||
async def setup_vdf_clients(port):
|
async def setup_vdf_clients(port):
|
||||||
vdf_task_1 = asyncio.create_task(spawn_process(self_hostname, port, 1))
|
vdf_task_1 = asyncio.create_task(spawn_process(self_hostname, port, 1))
|
||||||
vdf_task_2 = asyncio.create_task(spawn_process(self_hostname, port, 2))
|
vdf_task_2 = asyncio.create_task(spawn_process(self_hostname, port, 2))
|
||||||
|
@ -253,6 +267,7 @@ async def setup_timelord(port, full_node_port, sanitizer, consensus_constants: C
|
||||||
config["port"] = port
|
config["port"] = port
|
||||||
config["full_node_peer"]["port"] = full_node_port
|
config["full_node_peer"]["port"] = full_node_port
|
||||||
config["sanitizer_mode"] = sanitizer
|
config["sanitizer_mode"] = sanitizer
|
||||||
|
config["fast_algorithm"] = False
|
||||||
if sanitizer:
|
if sanitizer:
|
||||||
config["vdf_server"]["port"] = 7999
|
config["vdf_server"]["port"] = 7999
|
||||||
|
|
||||||
|
@ -403,8 +418,8 @@ async def setup_full_system(consensus_constants: ConsensusConstants):
|
||||||
setup_timelord(21236, 21237, False, consensus_constants, b_tools),
|
setup_timelord(21236, 21237, False, consensus_constants, b_tools),
|
||||||
setup_full_node(consensus_constants, "blockchain_test.db", 21237, b_tools, 21233, False, 10),
|
setup_full_node(consensus_constants, "blockchain_test.db", 21237, b_tools, 21233, False, 10),
|
||||||
setup_full_node(consensus_constants, "blockchain_test_2.db", 21238, b_tools_1, 21233, False, 10),
|
setup_full_node(consensus_constants, "blockchain_test_2.db", 21238, b_tools_1, 21233, False, 10),
|
||||||
# setup_vdf_clients(7999),
|
setup_vdf_client(7999),
|
||||||
# setup_timelord(21239, 21238, True, consensus_constants),
|
setup_timelord(21239, 21238, True, consensus_constants, b_tools),
|
||||||
]
|
]
|
||||||
|
|
||||||
introducer, introducer_server = await node_iters[0].__anext__()
|
introducer, introducer_server = await node_iters[0].__anext__()
|
||||||
|
@ -421,8 +436,8 @@ async def setup_full_system(consensus_constants: ConsensusConstants):
|
||||||
timelord, timelord_server = await node_iters[4].__anext__()
|
timelord, timelord_server = await node_iters[4].__anext__()
|
||||||
node_api_1 = await node_iters[5].__anext__()
|
node_api_1 = await node_iters[5].__anext__()
|
||||||
node_api_2 = await node_iters[6].__anext__()
|
node_api_2 = await node_iters[6].__anext__()
|
||||||
# vdf_sanitizer = await node_iters[7].__anext__()
|
vdf_sanitizer = await node_iters[7].__anext__()
|
||||||
# sanitizer, sanitizer_server = await node_iters[8].__anext__()
|
sanitizer, sanitizer_server = await node_iters[8].__anext__()
|
||||||
|
|
||||||
yield (
|
yield (
|
||||||
node_api_1,
|
node_api_1,
|
||||||
|
@ -432,6 +447,8 @@ async def setup_full_system(consensus_constants: ConsensusConstants):
|
||||||
introducer,
|
introducer,
|
||||||
timelord,
|
timelord,
|
||||||
vdf_clients,
|
vdf_clients,
|
||||||
|
vdf_sanitizer,
|
||||||
|
sanitizer,
|
||||||
node_api_1.full_node.server,
|
node_api_1.full_node.server,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
88
tests/simulation/test_simulation.py
Normal file
88
tests/simulation/test_simulation.py
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
import pytest
|
||||||
|
from tests.core.full_node.test_full_sync import node_height_at_least
|
||||||
|
from tests.setup_nodes import setup_full_system, setup_full_node, test_constants, self_hostname
|
||||||
|
from src.util.ints import uint16
|
||||||
|
from tests.time_out_assert import time_out_assert
|
||||||
|
from src.types.peer_info import PeerInfo
|
||||||
|
from src.util.block_tools import BlockTools
|
||||||
|
|
||||||
|
test_constants_modified = test_constants.replace(
|
||||||
|
**{
|
||||||
|
"DIFFICULTY_STARTING": 2 ** 8,
|
||||||
|
"DISCRIMINANT_SIZE_BITS": 1024,
|
||||||
|
"SUB_EPOCH_BLOCKS": 140,
|
||||||
|
"WEIGHT_PROOF_THRESHOLD": 2,
|
||||||
|
"WEIGHT_PROOF_RECENT_BLOCKS": 350,
|
||||||
|
"MAX_SUB_SLOT_BLOCKS": 50,
|
||||||
|
"NUM_SPS_SUB_SLOT": 32, # Must be a power of 2
|
||||||
|
"EPOCH_BLOCKS": 280,
|
||||||
|
"SUB_SLOT_ITERS_STARTING": 2 ** 20,
|
||||||
|
"NUMBER_ZERO_BITS_PLOT_FILTER": 5,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSimulation:
|
||||||
|
@pytest.fixture(scope="function")
|
||||||
|
async def extra_node(self):
|
||||||
|
b_tools = BlockTools(constants=test_constants_modified)
|
||||||
|
async for _ in setup_full_node(test_constants_modified, "blockchain_test_3.db", 21240, b_tools):
|
||||||
|
yield _
|
||||||
|
|
||||||
|
@pytest.fixture(scope="function")
|
||||||
|
async def simulation(self):
|
||||||
|
async for _ in setup_full_system(test_constants_modified):
|
||||||
|
yield _
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_simulation_1(self, simulation, extra_node):
|
||||||
|
node1, node2, _, _, _, _, _, _, _, server1 = simulation
|
||||||
|
await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
|
||||||
|
# Use node2 to test node communication, since only node1 extends the chain.
|
||||||
|
await time_out_assert(1500, node_height_at_least, True, node2, 7)
|
||||||
|
|
||||||
|
async def has_compact(node1, node2):
|
||||||
|
peak_height_1 = node1.full_node.blockchain.get_peak_height()
|
||||||
|
headers_1 = await node1.full_node.blockchain.get_header_blocks_in_range(0, peak_height_1)
|
||||||
|
peak_height_2 = node2.full_node.blockchain.get_peak_height()
|
||||||
|
headers_2 = await node2.full_node.blockchain.get_header_blocks_in_range(0, peak_height_2)
|
||||||
|
# Commented to speed up.
|
||||||
|
# cc_eos = [False, False]
|
||||||
|
# icc_eos = [False, False]
|
||||||
|
# cc_sp = [False, False]
|
||||||
|
# cc_ip = [False, False]
|
||||||
|
has_compact = [False, False]
|
||||||
|
for index, headers in enumerate([headers_1, headers_2]):
|
||||||
|
for header in headers.values():
|
||||||
|
for sub_slot in header.finished_sub_slots:
|
||||||
|
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
|
||||||
|
# cc_eos[index] = True
|
||||||
|
has_compact[index] = True
|
||||||
|
if (
|
||||||
|
sub_slot.proofs.infused_challenge_chain_slot_proof is not None
|
||||||
|
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
# icc_eos[index] = True
|
||||||
|
has_compact[index] = True
|
||||||
|
if (
|
||||||
|
header.challenge_chain_sp_proof is not None
|
||||||
|
and header.challenge_chain_sp_proof.normalized_to_identity
|
||||||
|
):
|
||||||
|
# cc_sp[index] = True
|
||||||
|
has_compact[index] = True
|
||||||
|
if header.challenge_chain_ip_proof.normalized_to_identity:
|
||||||
|
# cc_ip[index] = True
|
||||||
|
has_compact[index] = True
|
||||||
|
|
||||||
|
# return (
|
||||||
|
# cc_eos == [True, True] and icc_eos == [True, True] and cc_sp == [True, True] and cc_ip == [True, True]
|
||||||
|
# )
|
||||||
|
return has_compact == [True, True]
|
||||||
|
|
||||||
|
await time_out_assert(1500, has_compact, True, node1, node2)
|
||||||
|
node3 = extra_node
|
||||||
|
server3 = node3.full_node.server
|
||||||
|
peak_height = max(node1.full_node.blockchain.get_peak_height(), node2.full_node.blockchain.get_peak_height())
|
||||||
|
await server3.start_client(PeerInfo(self_hostname, uint16(21237)))
|
||||||
|
await server3.start_client(PeerInfo(self_hostname, uint16(21238)))
|
||||||
|
await time_out_assert(600, node_height_at_least, True, node3, peak_height)
|
Loading…
Reference in a new issue