ln: store HTLCStateMachine in lnworker.channels
This commit is contained in:
@@ -14,6 +14,7 @@ from .lnbase import Peer, Outpoint, ChannelConfig, LocalState, RemoteState, Keyp
|
||||
from .lightning_payencode.lnaddr import lnencode, LnAddr, lndecode
|
||||
from .ecc import ECPrivkey, CURVE_ORDER, der_sig_from_sig_string
|
||||
from .transaction import Transaction
|
||||
from .lnhtlc import HTLCStateMachine
|
||||
|
||||
is_key = lambda k: k.endswith("_basepoint") or k.endswith("_key")
|
||||
|
||||
@@ -53,7 +54,7 @@ def serialize_channels(channels_dict):
|
||||
serialized_channels = []
|
||||
for chan in channels_dict.values():
|
||||
namedtuples_to_dict = lambda v: {i: j._asdict() if isinstance(j, tuple) else j for i, j in v._asdict().items()}
|
||||
serialized_channels.append({k: namedtuples_to_dict(v) if isinstance(v, tuple) else v for k, v in chan._asdict().items()})
|
||||
serialized_channels.append({k: namedtuples_to_dict(v) if isinstance(v, tuple) else v for k, v in chan.state._asdict().items()})
|
||||
class MyJsonEncoder(json.JSONEncoder):
|
||||
def default(self, o):
|
||||
if isinstance(o, bytes):
|
||||
@@ -64,8 +65,8 @@ def serialize_channels(channels_dict):
|
||||
dumped = MyJsonEncoder().encode(serialized_channels)
|
||||
roundtripped = json.loads(dumped)
|
||||
reconstructed = set(reconstruct_namedtuples(x) for x in roundtripped)
|
||||
if reconstructed != set(channels_dict.values()):
|
||||
raise Exception("Channels did not roundtrip serialization without changes:\n" + repr(reconstructed) + "\n" + repr(channels))
|
||||
if reconstructed != set(x.state for x in channels_dict.values()):
|
||||
raise Exception("Channels did not roundtrip serialization without changes:\n" + repr(reconstructed) + "\n" + repr(channels_dict))
|
||||
return roundtripped
|
||||
|
||||
|
||||
@@ -90,10 +91,10 @@ class LNWorker(PrintError):
|
||||
self.pubkey = ECPrivkey(self.privkey).get_public_key_bytes()
|
||||
self.config = network.config
|
||||
self.peers = {}
|
||||
self.channels = {x.channel_id: x for x in map(reconstruct_namedtuples, wallet.storage.get("channels", []))}
|
||||
self.channels = {x.channel_id: HTLCStateMachine(x) for x in map(reconstruct_namedtuples, wallet.storage.get("channels", []))}
|
||||
self.invoices = wallet.storage.get('lightning_invoices', {})
|
||||
peer_list = network.config.get('lightning_peers', node_list)
|
||||
self.channel_state = {chan.channel_id: "DISCONNECTED" for chan in self.channels.values()}
|
||||
self.channel_state = {chan.state.channel_id: "DISCONNECTED" for chan in self.channels.values()}
|
||||
for chan_id, chan in self.channels.items():
|
||||
self.network.lnwatcher.watch_channel(chan, self.on_channel_utxos)
|
||||
for host, port, pubkey in peer_list:
|
||||
@@ -104,7 +105,7 @@ class LNWorker(PrintError):
|
||||
|
||||
def channels_for_peer(self, node_id):
|
||||
assert type(node_id) is bytes
|
||||
return {x: y for (x, y) in self.channels.items() if y.node_id == node_id}
|
||||
return {x: y for (x, y) in self.channels.items() if y.state.node_id == node_id}
|
||||
|
||||
def add_peer(self, host, port, node_id):
|
||||
peer = Peer(self, host, int(port), node_id, request_initial_sync=self.config.get("request_initial_sync", True))
|
||||
@@ -113,17 +114,18 @@ class LNWorker(PrintError):
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def save_channel(self, openchannel):
|
||||
if openchannel.channel_id not in self.channel_state:
|
||||
self.channel_state[openchannel.channel_id] = "OPENING"
|
||||
self.channels[openchannel.channel_id] = openchannel
|
||||
assert type(openchannel) is HTLCStateMachine
|
||||
if openchannel.state.channel_id not in self.channel_state:
|
||||
self.channel_state[openchannel.state.channel_id] = "OPENING"
|
||||
self.channels[openchannel.state.channel_id] = openchannel
|
||||
for node_id, peer in self.peers.items():
|
||||
peer.channels = self.channels_for_peer(node_id)
|
||||
if openchannel.remote_state.next_per_commitment_point == openchannel.remote_state.current_per_commitment_point:
|
||||
if openchannel.state.remote_state.next_per_commitment_point == openchannel.state.remote_state.current_per_commitment_point:
|
||||
raise Exception("Tried to save channel with next_point == current_point, this should not happen")
|
||||
dumped = serialize_channels(self.channels)
|
||||
self.wallet.storage.put("channels", dumped)
|
||||
self.wallet.storage.write()
|
||||
self.network.trigger_callback('channel', openchannel)
|
||||
self.network.trigger_callback('channel', openchannel.state)
|
||||
|
||||
def save_short_chan_id(self, chan):
|
||||
"""
|
||||
@@ -131,48 +133,48 @@ class LNWorker(PrintError):
|
||||
|
||||
If the Funding TX has not been mined, return None
|
||||
"""
|
||||
assert self.channel_state[chan.channel_id] in ["OPEN", "OPENING"]
|
||||
peer = self.peers[chan.node_id]
|
||||
conf = self.wallet.get_tx_height(chan.funding_outpoint.txid)[1]
|
||||
if conf >= chan.constraints.funding_txn_minimum_depth:
|
||||
block_height, tx_pos = self.wallet.get_txpos(chan.funding_outpoint.txid)
|
||||
assert self.channel_state[chan.state.channel_id] in ["OPEN", "OPENING"]
|
||||
peer = self.peers[chan.state.node_id]
|
||||
conf = self.wallet.get_tx_height(chan.state.funding_outpoint.txid)[1]
|
||||
if conf >= chan.state.constraints.funding_txn_minimum_depth:
|
||||
block_height, tx_pos = self.wallet.get_txpos(chan.state.funding_outpoint.txid)
|
||||
if tx_pos == -1:
|
||||
self.print_error('funding tx is not yet SPV verified.. but there are '
|
||||
'already enough confirmations (currently {})'.format(conf))
|
||||
return None
|
||||
chan = chan._replace(short_channel_id = calc_short_channel_id(block_height, tx_pos, chan.funding_outpoint.output_index))
|
||||
return False
|
||||
chan.state = chan.state._replace(short_channel_id = calc_short_channel_id(block_height, tx_pos, chan.state.funding_outpoint.output_index))
|
||||
self.save_channel(chan)
|
||||
return chan
|
||||
return None
|
||||
return True
|
||||
return False
|
||||
|
||||
def on_channel_utxos(self, chan, utxos):
|
||||
outpoints = [Outpoint(x["tx_hash"], x["tx_pos"]) for x in utxos]
|
||||
if chan.funding_outpoint not in outpoints:
|
||||
self.channel_state[chan.channel_id] = "CLOSED"
|
||||
elif self.channel_state[chan.channel_id] == 'DISCONNECTED':
|
||||
peer = self.peers[chan.node_id]
|
||||
if chan.state.funding_outpoint not in outpoints:
|
||||
self.channel_state[chan.state.channel_id] = "CLOSED"
|
||||
elif self.channel_state[chan.state.channel_id] == 'DISCONNECTED':
|
||||
peer = self.peers[chan.state.node_id]
|
||||
coro = peer.reestablish_channel(chan)
|
||||
asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
|
||||
|
||||
def on_network_update(self, event, *args):
|
||||
for chan in self.channels.values():
|
||||
peer = self.peers[chan.node_id]
|
||||
if self.channel_state[chan.channel_id] == "OPENING":
|
||||
chan = self.save_short_chan_id(chan)
|
||||
if not chan:
|
||||
peer = self.peers[chan.state.node_id]
|
||||
if self.channel_state[chan.state.channel_id] == "OPENING":
|
||||
res = self.save_short_chan_id(chan)
|
||||
if not res:
|
||||
self.print_error("network update but funding tx is still not at sufficient depth")
|
||||
continue
|
||||
# this results in the channel being marked OPEN
|
||||
peer.funding_locked(chan)
|
||||
elif self.channel_state[chan.channel_id] == "OPEN":
|
||||
conf = self.wallet.get_tx_height(chan.funding_outpoint.txid)[1]
|
||||
elif self.channel_state[chan.state.channel_id] == "OPEN":
|
||||
conf = self.wallet.get_tx_height(chan.state.funding_outpoint.txid)[1]
|
||||
peer.on_network_update(chan, conf)
|
||||
|
||||
async def _open_channel_coroutine(self, node_id, amount_sat, push_sat, password):
|
||||
if node_id not in self.peers:
|
||||
node = self.network.lightning_nodes.get(node_id)
|
||||
if node is None:
|
||||
return False
|
||||
return "node not found, peers available are: " + str(self.network.lightning_nodes.keys())
|
||||
host, port = node['addresses'][0]
|
||||
self.add_peer(host, port, node_id)
|
||||
peer = self.peers[node_id]
|
||||
@@ -198,7 +200,7 @@ class LNWorker(PrintError):
|
||||
node_id, short_channel_id = path[0]
|
||||
peer = self.peers[node_id]
|
||||
for chan in self.channels.values():
|
||||
if chan.short_channel_id == short_channel_id:
|
||||
if chan.state.short_channel_id == short_channel_id:
|
||||
break
|
||||
else:
|
||||
raise Exception("ChannelDB returned path with short_channel_id that is not in channel list")
|
||||
@@ -206,7 +208,6 @@ class LNWorker(PrintError):
|
||||
return asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
|
||||
|
||||
def add_invoice(self, amount_sat, message):
|
||||
is_open = lambda chan: self.channel_state[chan.channel_id] == "OPEN"
|
||||
payment_preimage = os.urandom(32)
|
||||
RHASH = sha256(payment_preimage)
|
||||
amount_btc = amount_sat/Decimal(COIN) if amount_sat else None
|
||||
@@ -230,13 +231,15 @@ class LNWorker(PrintError):
|
||||
return serialize_channels(self.channels)
|
||||
|
||||
def close_channel(self, chan_id):
|
||||
from .lnhtlc import HTLCStateMachine
|
||||
chan = self.channels[chan_id]
|
||||
# local_commitment always gives back the next expected local_commitment,
|
||||
# but in this case, we want the current one. So substract one ctn number
|
||||
tx = HTLCStateMachine(chan._replace(local_state=chan.local_state._replace(ctn=chan.local_state.ctn - 1))).local_commitment
|
||||
tx.sign({bh2u(chan.local_config.multisig_key.pubkey): (chan.local_config.multisig_key.privkey, True)})
|
||||
remote_sig = chan.local_state.current_commitment_signature
|
||||
old_state = chan.state
|
||||
chan.state = chan.state._replace(local_state=chan.state.local_state._replace(ctn=chan.state.local_state.ctn - 1))
|
||||
tx = chan.local_commitment
|
||||
chan.state = old_state
|
||||
tx.sign({bh2u(chan.state.local_config.multisig_key.pubkey): (chan.state.local_config.multisig_key.privkey, True)})
|
||||
remote_sig = chan.state.local_state.current_commitment_signature
|
||||
remote_sig = der_sig_from_sig_string(remote_sig) + b"\x01"
|
||||
none_idx = tx._inputs[0]["signatures"].index(None)
|
||||
tx.add_signature_to_txin(0, none_idx, bh2u(remote_sig))
|
||||
|
||||
Reference in New Issue
Block a user