mirror of
https://github.com/lightning/bolts.git
synced 2025-02-22 14:22:46 +01:00
Script for extracting structured protocol messages
This changes extract-formats.py so that other scripts can use it, but retains normal functionality. The new script (structured.py) parses the CSV variant and shows a representation of an OrderedMap. This could be used to write parsers.
This commit is contained in:
parent
a11d234e18
commit
22544d7789
2 changed files with 115 additions and 63 deletions
|
@ -51,33 +51,24 @@ def guess_alignment(message,name,sizestr):
|
|||
|
||||
return 1
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("--message-types",
|
||||
action="store_true", dest="output_types", default=False,
|
||||
help="Output MESSAGENAME,VALUE for every message")
|
||||
parser.add_option("--check-alignment",
|
||||
action="store_true", dest="check_alignment", default=False,
|
||||
help="Check alignment for every member of each message")
|
||||
parser.add_option("--message-fields",
|
||||
action="store_true", dest="output_fields", default=False,
|
||||
help="Output MESSAGENAME,OFFSET,FIELDNAME,SIZE for every message")
|
||||
def main(options, args=None, output=sys.stdout, lines=None):
|
||||
# Example inputs:
|
||||
# 1. type: 17 (`error`)
|
||||
# 2. data:
|
||||
# * [`8`:`channel_id`]
|
||||
# * [`4`:`len`]
|
||||
# * [`len`:`data`]
|
||||
#
|
||||
# 1. type: PERM|NODE|3 (`required_node_feature_missing`)
|
||||
message = None
|
||||
havedata = None
|
||||
typeline = re.compile('1\. type: (?P<value>[-0-9A-Za-z_|]+) \(`(?P<name>[A-Za-z_]+)`\)')
|
||||
dataline = re.compile('\s+\* \[`(?P<size>[_a-z0-9*+]+)`:`(?P<name>[_a-z0-9]+)`\]')
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
if lines is None:
|
||||
lines = fileinput.input(args)
|
||||
|
||||
# Example inputs:
|
||||
# 1. type: 17 (`error`)
|
||||
# 2. data:
|
||||
# * [`8`:`channel_id`]
|
||||
# * [`4`:`len`]
|
||||
# * [`len`:`data`]
|
||||
#
|
||||
# 1. type: PERM|NODE|3 (`required_node_feature_missing`)
|
||||
message = None
|
||||
havedata = None
|
||||
typeline = re.compile('1\. type: (?P<value>[-0-9A-Za-z_|]+) \(`(?P<name>[A-Za-z_]+)`\)')
|
||||
dataline = re.compile('\s+\* \[`(?P<size>[_a-z0-9*+]+)`:`(?P<name>[_a-z0-9]+)`\]')
|
||||
|
||||
for i,line in enumerate(fileinput.input(args)):
|
||||
for i,line in enumerate(lines):
|
||||
line = line.rstrip()
|
||||
linenum = i+1
|
||||
|
||||
|
@ -87,7 +78,7 @@ for i,line in enumerate(fileinput.input(args)):
|
|||
raise ValueError('{}:Found a message while I was already in a message'.format(linenum))
|
||||
message = match.group('name')
|
||||
if options.output_types:
|
||||
print("{},{}".format(match.group('name'), match.group('value')))
|
||||
print("{},{}".format(match.group('name'), match.group('value')), file=output)
|
||||
havedata = None
|
||||
alignoff = False
|
||||
elif message is not None and havedata is None:
|
||||
|
@ -111,7 +102,7 @@ for i,line in enumerate(fileinput.input(args)):
|
|||
raise ValueError('{}:message {} field {} Offset {} not aligned on {} boundary:'.format(linenum, message, match.group('name'), dataoff, align))
|
||||
|
||||
if options.output_fields:
|
||||
print("{},{}{},{},{}".format(message,dataoff,off_extraterms,match.group('name'),match.group('size')))
|
||||
print("{},{}{},{},{}".format(message,dataoff,off_extraterms,match.group('name'),match.group('size')), file=output)
|
||||
|
||||
# Size can be variable.
|
||||
try:
|
||||
|
@ -121,3 +112,19 @@ for i,line in enumerate(fileinput.input(args)):
|
|||
off_extraterms = off_extraterms + "+" + match.group('size')
|
||||
else:
|
||||
message = None
|
||||
|
||||
if __name__=="__main__":
|
||||
parser = OptionParser()
|
||||
parser.add_option("--message-types",
|
||||
action="store_true", dest="output_types", default=False,
|
||||
help="Output MESSAGENAME,VALUE for every message")
|
||||
parser.add_option("--check-alignment",
|
||||
action="store_true", dest="check_alignment", default=False,
|
||||
help="Check alignment for every member of each message")
|
||||
parser.add_option("--message-fields",
|
||||
action="store_true", dest="output_fields", default=False,
|
||||
help="Output MESSAGENAME,OFFSET,FIELDNAME,SIZE for every message")
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
main(options, args)
|
||||
|
|
45
tools/structured.py
Normal file
45
tools/structured.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
formats = __import__("extract-formats")
|
||||
from io import StringIO
|
||||
import glob
|
||||
import collections
|
||||
import json
|
||||
|
||||
class Options(object):
|
||||
output_types = True
|
||||
output_fields = True
|
||||
check_alignment = False
|
||||
|
||||
options = Options()
|
||||
csv = []
|
||||
|
||||
output = StringIO()
|
||||
for i in sorted(glob.glob("../??-*.md")):
|
||||
with open(i) as f:
|
||||
formats.main(options, output=output, lines=f.readlines())
|
||||
csvstr = output.getvalue().strip()
|
||||
if csvstr == "": continue
|
||||
csv += csvstr.split("\n")
|
||||
|
||||
resmap = collections.OrderedDict()
|
||||
|
||||
currentmsgname = None
|
||||
currentmsgfields = {}
|
||||
typenum = None
|
||||
for line in csv:
|
||||
parts = line.split(",")
|
||||
if len(parts) == 2:
|
||||
if currentmsgname is not None:
|
||||
resmap[currentmsgname] = collections.OrderedDict([("type", typenum), ("payload", currentmsgfields)])
|
||||
currentmsgfields = collections.OrderedDict()
|
||||
currentmsgname = parts[0]
|
||||
typenum = parts[1]
|
||||
continue
|
||||
assert currentmsgname == parts[0], line
|
||||
assert len(parts) == 4, line
|
||||
position = parts[1]
|
||||
length = parts[3]
|
||||
fieldname = parts[2]
|
||||
currentmsgfields[fieldname] = {"position": position, "length": length}
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(json.dumps(resmap, indent=True))
|
Loading…
Add table
Reference in a new issue