summaryrefslogtreecommitdiff
path: root/thesis.org
blob: fc1df6a4c963d3f640bc198e08676cff0d72b79f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
#+options: ':t *:t -:t ::t <:t H:4 \n:nil ^:t arch:headline author:t
#+options: broken-links:nil c:nil creator:nil d:(not "LOGBOOK") date:t e:t
#+options: email:nil f:t inline:t num:t p:nil pri:nil prop:nil stat:t tags:t
#+options: tasks:t tex:t timestamp:t title:t toc:t todo:t |:t
#+title: Semantics of an embedded vector architecture for formal verification of software
#+date: \today
#+author: Greg Brown
#+email: greg.brown@cl.cam.ac.uk
#+language: en-GB
#+select_tags: export
#+exclude_tags: noexport
#+creator: Emacs 27.2 (Org mode 9.6)
#+cite_export: biblatex
#+bibliography: ./thesis.bib
#+latex_class: thesis
#+latex_class_options: [twoside,a4paper,11pt]
#+latex_header: \usepackage[hyperref=true,url=true,backend=biber,natbib=true]{biblatex}
#+latex_header: \usepackage[moderate]{savetrees}
#+latex_header: \usepackage[a4paper]{geometry}
#+latex_header: \usepackage{minted}
#+latex_header: \usepackage{newunicodechar}
#+latex_header: \usepackage{mathtools}
#+latex_header: \usepackage{stmaryrd}
#+latex_header: \usepackage[english=british]{csquotes}
#+latex_compiler: pdflatex

#+latex_header: \newunicodechar{Γ}{\ensuremath{\Gamma}}
#+latex_header: \newunicodechar{Δ}{\ensuremath{\Delta}}
#+latex_header: \newunicodechar{Κ}{\ensuremath{K}}
#+latex_header: \newunicodechar{Σ}{\ensuremath{\Sigma}}
#+latex_header: \newunicodechar{γ}{\ensuremath{\gamma}}
#+latex_header: \newunicodechar{δ}{\ensuremath{\delta}}
#+latex_header: \newunicodechar{λ}{\ensuremath{\lambda}}
#+latex_header: \newunicodechar{σ}{\ensuremath{\sigma}}
#+latex_header: \newunicodechar{ᵗ}{\ensuremath{^\texttt{t}}}
#+latex_header: \newunicodechar{′}{\ensuremath{'}}
#+latex_header: \newunicodechar{₁}{\ensuremath{_1}}
#+latex_header: \newunicodechar{₂}{\ensuremath{_2}}
#+latex_header: \newunicodechar{ₛ}{\ensuremath{_\texttt{s}}}
#+latex_header: \newunicodechar{ₜ}{\ensuremath{_\texttt{t}}}
#+latex_header: \newunicodechar{ℓ}{l}
#+latex_header: \newunicodechar{ℕ}{\ensuremath{\mathbb{N}}}
#+latex_header: \newunicodechar{ℚ}{\ensuremath{\mathbb{Q}}}
#+latex_header: \newunicodechar{ℝ}{\ensuremath{\mathbb{R}}}
#+latex_header: \newunicodechar{ℤ}{\ensuremath{\mathbb{Z}}}
#+latex_header: \newunicodechar{⇒}{\ensuremath{\Rightarrow}}
#+latex_header: \newunicodechar{∀}{\ensuremath{\forall}}
#+latex_header: \newunicodechar{∃}{\ensuremath{\exists}}
#+latex_header: \newunicodechar{∘}{\ensuremath{\circ}}
#+latex_header: \newunicodechar{∙}{\ensuremath{\cdot}}
#+latex_header: \newunicodechar{∧}{\ensuremath{\wedge}}
#+latex_header: \newunicodechar{∨}{\ensuremath{\vee}}
#+latex_header: \newunicodechar{∷}{\texttt{::}}
#+latex_header: \newunicodechar{≈}{\ensuremath{\approx}}
#+latex_header: \newunicodechar{≉}{\ensuremath{\not\approx}}
#+latex_header: \newunicodechar{≔}{\ensuremath{\coloneqq}}
#+latex_header: \newunicodechar{≟}{\ensuremath{\buildrel ?\over =}}
#+latex_header: \newunicodechar{≡}{\ensuremath{\equiv}}
#+latex_header: \newunicodechar{⊆}{\ensuremath{\subseteq}}
#+latex_header: \newunicodechar{⊎}{\ensuremath{\uplus}}
#+latex_header: \newunicodechar{⊤}{\ensuremath{\top}}
#+latex_header: \newunicodechar{⊥}{\ensuremath{\bot}}
#+latex_header: \newunicodechar{⌊}{\ensuremath{\lfloor}}
#+latex_header: \newunicodechar{⌋}{\ensuremath{\rfloor}}
#+latex_header: \newunicodechar{⟦}{\ensuremath{\llbracket}}
#+latex_header: \newunicodechar{⟧}{\ensuremath{\rrbracket}}
#+latex_header: \newunicodechar{⟶}{\ensuremath{\rightarrow}}
#+latex_header: \newunicodechar{⦃}{\{\{}
#+latex_header: \newunicodechar{⦄}{\}\}}
#+latex_header: \newunicodechar{𝕀}{\ensuremath{\mathbb{I}}}

#+latex_header: %TC:envir minted 1 other

#+begin_abstract
All good implementations of any algorithm should be correct and fast. To
maximise performance some algorithms are written in hand-tuned assembly. This
can introduce subtle bugs that invalidate correctness or other safety
properties. Whilst tools exists to help formally verify these algorithms, none
are designed to target the recent M-profile Vector Extension for the Armv8.1-M
architecture. This work describes a denotational and Hoare logic semantics for
the language used to specify the instructions, and attempts to use them to
formally verify the correctness of hand-written assembly for cryptographic
applications.
#+end_abstract

# Flip this all around. What am I doing? What is the stuff? Why is it hard? (Why
# am I smart?)
* Introduction

# Merge these two paras
In nearly all cases, the best implementation of an algorithm for a particular
purpose is both correct and fast. If the implementation is not correct, then it
does not implement the algorithm. If the implementation is slow, then resources are
wasted executing this algorithm over performing other useful work. Ensuring
implementations are both performant and correct is typically difficult.

One case that proves particularly tricky is writing assembly code.
# dubious claim
Most modern handwritten assembly is for cryptographic algorithms, where it is
vital that compilers do not introduce timing attacks or other side channels.
Hand written assembly can also take advantage of microarchitectural differences
to gain a slight performance benefit.

Another domain that frequently deals with assembly code is the output of
compilers. Optimising compilers take the semantic operations described by
high-level code and attempt to produce the optimal machine code to perform those
actions.

In both of these cases, either humans or machines shuffle around instructions in
an attempt to eek out the best performance from hardware. The resulting code
warps the original algorithms in ways that make it difficult to determine
whether the actions performed actually have the same result. The correctness of
the assembly code comes into question.

It is only possible to prove the assembly code is correct if there is a model
describing how it should behave. Suitable mathematical models of the behaviour
allow for the construction of formal proofs certifying the assembly code has the
correct behaviour. Due to the size and complexity of different instruction set
architectures (ISAs), formal verification of software in this manner makes use
of a number of software tools.

Formal verification software is well-developed for the x86-64 and Arm A-profile
ISAs. For instance, Jasmin [cite:@10.1145/3133956.3134078] is a programming
language which verifies that compiling to x86-64 preserves a set of user-defined
safety properties. Another similar tool is CompCert [cite:@hal/01238879] which
is a verified C compiler, ensuring that the program semantics are preserved
during compilation.

Unfortunately, little work has been done to formally verify software written for
the Arm M-profile architecture. This architecture is designed for low-power and
low-latency microcontrollers, which operate in resource-constrained
environments.

The goal of this project is to formally verify an assembly function for the Arm
M-profile architecture. The specific algorithm
[cite:@10.46586/tches.v2022.i1.482-505] is a hand-written highly-optimised
assembly-code implementation of the number-theoretic transform, which is an
important procedure for post-quantum cryptography.

This work has made progress on two fronts. Most work has been spent developing
an embedding and semantic model of the Armv8.1-M pseudocode specification
language for the Agda programming language [cite:@10.1007/978-3-642-03359-9_6].
All instructions in the Armv8.1-M architecture are given a "precise description"
[cite:@arm/DDI0553B.s §\(\texttt{I}_\texttt{RTDZ}\)] using the pseudocode, and
by developing an embedding within Agda, it is possible to use its powerful type
system to construct formal proofs of correctness.

Progress has also been made on the formal algebraic verification of Barrett
reduction, a subroutine of the NTT.  Whilst there exists a paper proof of the
correctness of Barrett reduction, a proof of Barrett reduction within Agda is
necessary to be able to prove the correctness of the given implementation of the
NTT.

# Focus on contributions
# Structure of the paper:
# - [[Armv8.1-M Pseudocode Specification Language]] describes the Armv8.1-M pseudocode
#   specification language. This is an imperative programming language used by the
#   Armv8.1-M manual to describe the operation of the instructions.
# - A simple recap of Hoare logic is given in [[Hoare Logic]]. This is the backbone of
#   one of the formal verification techniques used in this work.
# - [[Formal Definition of MSL]] contains a description of MSL. This is a language
#   similar to the Armv8.1-M pseudocode embedded within the Agda programming
#   language.
# - The denotational semantics and a Hoare logic semantics of MSL are detailed in
#   [[Semantics of MSL]]. Due to Agda's nature of being a dependently-typed language,
#   these are both given using Agda code.
# - [[Application to Proofs]] describes the experience of using the Hoare logic and
#   denotational semantics of MSL to prove the correctness of some simple
#   routines, given in [[Proofs using Hoare Semantics]] and [[Proofs using Denotational
#   Semantics]] respectively.
# - Formal verification of Barrett reduction, an important subroutine in the NTT,
#   is given in [[Proofs of Barrett Reduction]]. In particular, it proves that Barrett
#   reduction performs a modulo reduction and gives bounds on the size of the
#   result.
# - Finally, [[Future Work]] describes the steps necessary to complete the formal
#   verification of the NTT, as well as listing some other directions this work
#   can be taken.

* Background
The Armv8.1-M pseudocode specification language is used to describe the
operation of instructions in the Armv8-M instruction set [cite:@arm/DDI0553B.s §E1.1]. If the semantics of this pseudocode can be formalised, then it is
possible to formulate proofs about the action of all Armv8-M instructions, and
hence construct proofs about the algorithms written with them. The language is
rather simple, being pseudocode used as a descriptive aid, but has some
interesting design choices atypical of regular imperative programming languages.

As the pseudocode is an imperative language, one useful proof system for it is
Hoare logic. Hoare logic is a proof system driven by the syntax of a program,
with most of the hard work of proofs being in the form of choosing suitable
program invariants and solving simple logical implications
[cite:@10.1145/363235.363259]. As the logic is syntax driven, proofs using Hoare
logic are less impacted than other proof systems by the large number of loops
used in Armv8-M instruction descriptions.  Further, solving simple logical
implications is a task well-suited to Agda and other proof assistants, making
proofs even simpler to construct.

** Armv8.1-M Pseudocode Specification Language
The Armv8.1-M pseudocode specification language is a strongly-typed imperative
programming language [cite:@arm/DDI0553B.s §E1.2.1]. It has a first-order type
system, a small set of operators and basic control flow, as you would find in
most imperative languages. There are some interesting design choices atypical of
regular imperative languages to better fulfil the requirements of being a
descriptive aid over an executable language.

Something common to nearly all imperative languages is the presence of a
primitive type for Booleans. Other typical type constructors are tuples,
structs, enumerations and fixed-length arrays. The first interesting type used
by the pseudocode is mathematical integers as a primitive type. Most imperative
languages use fixed-width integers for primitive types, with exact integers
available through some library. This is because the performance benefits of
using fixed-width integers in code far outweigh the risk of overflow. However,
as the pseudocode is a descriptive aid, with no intention of being executed, it
can use exact mathematical integers and eliminate overflow errors without any
performance cost [cite:@arm/DDI0553B.s §E1.3.4].

Another such type present in the pseudocode is mathematical real numbers. As
most real numbers are impossible to record using finite storage, any executable
programming language must make compromises to the precision of real numbers.
Because the pseudocode does not concern itself with being executable, it is free
to use real numbers and have exact precision in real-number arithmetic
[cite:@arm/DDI0553B.s §E1.2.4].

The final primitive type used by the pseudocode is the bitstring; a fixed-length
sequence of 0s and 1s. Some readers may wonder what the difference is between
this type and arrays of Booleans. The justification given by
[cite/t:@arm/DDI0553B.s §E1.2.2] is more philosophical than practical:
"bitstrings are the only concrete data type in pseudocode". In some places,
bitstrings can be used instead of integers in arithmetic operations.

Most of the operators used by the pseudocode are unsurprising. For instance,
Booleans have the standard set of short-circuiting operations; integers and
reals have addition, subtraction and multiplication; reals have division; and
integers have integer division (division rounding to \(-\infty\)) and modulus
(the remainder of division).

By far the two most interesting operations in the pseudocode are bitstring
concatenation and slicing. Bitstring concatenation is much like appending two
arrays together, or regular string concatenation. Bitstring slicing is a more
nuanced process. Slicing a bitstring by a single index is no different from a
regular array access. If instead a bitstring is sliced by a range of integers,
the result is the concatenation of each single-bit access. Finally, when
integers are sliced instead of bitstring, the pseudocode "treats an integer as
equivalent to a sufficiently long \textelp{} bitstring" [cite:@arm/DDI0553B.s
§E1.3.3].

The final interesting difference between the pseudocode and most imperative
languages is the variety of top-level items. The pseudocode has three forms of
items: procedures, functions and array-like functions. Procedures and functions
behave like procedures and functions of other imperative languages. The
arguments to them are passed by value, and the only difference between the two
is that procedures do not return values whilst functions do
[cite:@arm/DDI0553B.s §E1.4.2].

Array-like functions act as getters and setters for machine state. Every
array-like function has a reader form, and most have a writer form. This
distinction exists because "reading from and writing to an array element require
different functions", [cite:@arm/DDI0553B.s §E1.4.2], likely due to the nature
of some machine registers being read-only instead of read-writeable. The writer
form acts as one of the targets of assignment expressions, along with variables
and the result of bitstring concatenation and slicing [cite:@arm/DDI0553B.s
§E1.3.5].

** Hoare Logic
Hoare logic is a proof system for programs written in imperative programming
languages. At its core, the logic describes how to build partial correctness
triples, which describe how program statements affect assertions about machine
state. The bulk of a Hoare logic derivation is dependent only on the syntax of
the program the proof targets.

A partial correctness triple is a relation between a precondition \(P\), a
program statement \(s\) and a postcondition \(Q\). If \((P , s , Q)\) is a
partial correctness triple, then whenever \(P\) holds for some machine state,
then when executing \(s\), \(Q\) holds for the state after it terminates
[cite:@10.1145/363235.363259].  Those last three words, "after it terminates",
are what leads the relation being a /partial/ correctness triple. If all
statements terminate, which we will see later, then this relation is called a
correctness triple.

Along with the syntactic rules for derivations, Hoare logic typically also
features a number of adaptation rules. The most-widely known of these is the
rule of consequence, which can strengthen the precondition and weaken the
postcondition. This requires an additional logic for assertions. Typically, this
is first-order or higher-order logic
[cite:@10.1007/s00165-019-00501-3;@10.1007/s001650050057].

One vital feature of Hoare logic with regards to specification is auxiliary
variables. These are variables that cannot be used by programs, hence remain
constant between the precondition and postcondition
[cite:@10.1007/s001650050057].

* The MIPSL Language
The ultimate goal of this project is to formally verify an implementation of the
number theoretic transform using the M-profile vector extension of the Armv8.1-M
architecture. Work has proceeded on two fronts. This chapter discusses the
first; a formal definition of a language similar to the Armv8-M pseudocode
specification language, which I will call MIPSL (MIPSL imitates pseudocode
specification language). This definition exists within Agda, allowing for the
use of Agda's dependent-typing features to construct proofs about programs
written in MIPSL.

To construct proofs about how MIPSL behaves, it is necessary to describe its
semantics. This is done through providing a denotational semantics. Denotational
semantics interpret program expressions and statements as mathematical
functions, something which Agda is well-suited to do.

One downside of denotational semantics is that control flow for looping
constructs is fully evaluated. This is inefficient for loops that undergo many
iterations. This can be resolved by a syntax-directed Hoare logic for MIPSL.
Hoare logic derivations assign a precondition and a postcondition assertion to
each statement. These are chained together though a number of simple logical
implications.

** Definition of MIPSL
MIPSL is a language similar to the Armv8-M pseudocode specification language
written entirely in Agda. Unfortunately, the pseudocode has a number of small
features that make it difficult to work with in Agda directly. MIPSL makes a
number of small changes to the pseudocode to better facilitate this embedding,
typically generalising existing features of the pseudocode.

*** MIPSL Types

#+name: MIPSL-types
#+caption: The Agda datatype representing the types present in MIPSL. Most have
#+caption: a direct analogue in the Armv8-M pseudocode specification language
#+attr_latex: :float t
#+begin_src agda2
data Type : Set where
  bool  : Type
  int   : Type
  fin   : (n : ℕ) → Type
  real  : Type
  tuple : Vec Type n → Type
  array : Type → (n : ℕ) → Type
#+end_src

[[MIPSL-types]] gives the Agda datatype representing the types of MIPSL. Most of
these have a direct analogue to the pseudocode types. For example, ~bool~ is a
Boolean, ~int~ mathematical integers, ~real~ is for real numbers and ~array~
constructs array types. Instead of an enumeration construct, MIPSL uses the ~fin
n~ type, representing a finite set of ~n~ elements. Similarly, structs are
represented by ~tuple~ types.

The most significant difference between the pseudocode and MIPSL is the
representation of bitstrings. Whilst the pseudocode has the ~bits~ datatype,
MIPSL instead treats bitstrings as an array of Booleans, demonstrated by this
code fragment:

#+begin_src agda2
bit : Type
bit = bool

bits : ℕ → Type
bits = array bit
#+end_src

This removes the distinction between arrays and bitstrings, and allows a number
of operations to be generalised to work on both types. This makes MIPSL more
expressive than the pseudocode, in the sense that there are a greater number and
more concise ways to write programs that are functionally equivalent.

#+name: MIPSL-type-properties
#+caption: Prototypes for the three predicates on types.
#+attr_latex: :float t
#+begin_src agda2
data HasEquality : Type → Set
data Ordered     : Type → Set
data IsNumeric   : Type → Set
#+end_src

The pseudocode implicitly specifies three different properties of types: equality
comparisons, order comparisons and arithmetic operations. Whilst the types
satisfying these properties need to be listed explicitly in Agda, using instance
arguments allows for these proofs to be elided whenever they are required.
[[MIPSL-type-properties]] gives the type signatures of the three predicates.

MIPSL has only two differences in types that satisfy these properties compared
to the pseudocode. First, all array types have equality as long as the
enumerated type also has equality. This is a natural generalisation of the
equality between types, and allows for the MIPSL formulation of bitstrings as
arrays of Booleans to have equality. Secondly, finite sets also have ordering.
This change is primarily a convenience feature for comparing finite representing
a subset of integers. As the pseudocode has no ordering comparisons between
enumerations, this causes no problems for converting pseudocode programs into
MIPSL.

#+name: MIPSL-type-addition
#+caption: Definition of the "type addition" feature of MIPSL.
#+attr_latex: :float t
#+begin_src agda2
_+ᵗ_ : IsNumeric t₁ → IsNumeric t₂ → Type
int  +ᵗ int  = int
int  +ᵗ real = real
real +ᵗ t₂   = real
#+end_src

The final interesting feature of the types in MIPSL is "type addition". As
pseudocode arithmetic is polymorphic for integers and reals, MIPSL needs a
function to decide the type of the result. [[MIPSL-type-addition]] gives the
definition of this function.

*** MIPSL Expressions

#+name: MIPSL-literalType
#+caption: Mappings from MIPSL types into Agda types which can be used as
#+caption: literal values. ~literalTypes~ is a function that returns a product
#+caption: of the types given in the argument.
#+begin_src agda
literalType : Type → Set
literalType bool        = Bool
literalType int         = ℤ
literalType (fin n)     = Fin n
literalType real        = ℤ
literalType (tuple ts)  = literalTypes ts
literalType (array t n) = Vec (literalType t) n
#+end_src

Unlike the pseudocode, where only a few types have literal expressions, every
type in MIPSL has a literal form. This mapping is part of the ~literalType~
function, given in [[MIPSL-literalType]]. Most MIPSL literals accept the
corresponding Agda type as a value. For instance, ~bool~ literals are Agda
Booleans, and ~array~ literals are fixed-length Agda vectors of the
corresponding underlying type. The only exception to this rule is for ~real~
values. As Agda does not have a type representing mathematical reals, integers
are used instead.

# TODO: why is this sufficient?

#+name: MIPSL-expr-prototypes
#+caption: Prototypes of the numerous MIPSL program elements. Each one takes two
#+caption: variable contexts: ~Σ~ for global variables and ~Γ~ for local variables.
#+attr_latex: :float t
#+begin_src agda
data Expression     (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data Reference      (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data LocalReference (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data Statement      (Σ : Vec Type o) (Γ : Vec Type n) : Set
data LocalStatement (Σ : Vec Type o) (Γ : Vec Type n) : Set
data Function       (Σ : Vec Type o) (Γ : Vec Type n) (ret : Type) : Set
data Procedure      (Σ : Vec Type o) (Γ : Vec Type n) : Set
#+end_src

One important benefit of using Agda to write MIPSL is that expressions and
statements can have the MIPSL types of variables be part of the Agda type.
This makes checking type safety of variable usage trivial.

For reasons which will be discussed later, MIPSL uses two variable contexts, ~Σ~
for global variables and ~Γ~ for local variables. [[MIPSL-expr-prototypes]] lists
the prototypes for the various MIPSL program elements. The full definitions of
these types are given in [[*MIPSL Syntax Definition]].

An ~Expression~ in MIPSL corresponds with expressions in the pseudocode. Many
operators are identical to those in the pseudocode (like ~+~, ~*~, ~-~), and
others are simple renamings (like ~≟~ instead of ~==~ for equality comparisons).
Unlike the pseudocode, where literals can appear unqualified, MIPSL literals
are introduced by the ~lit~ constructor.

The most immediate change for programming in MIPSL versus the pseudocode is how
variables are handled. Because the ~Expression~ type carries fixed-length
vectors listing the MIPSL types of variables, a variable is referred to by its
index into the context. For example, a variable context \(\{x \mapsto
\mathrm{int}, y \mapsto \mathrm{real}\}\) is represented in MIPSL as the
context ~int ∷ real ∷ []~. The variable \(x\) is then represented by ~var 0F~ in
MIPSL. Because the global and local variable contexts are disjoint for the
~Expression~ types, variables are constructed using ~state~ or ~var~ respectively.

Whilst this decision introduces much complexity to programming using MIPSL, it
greatly simplifies the language for use in constructing proofs. For instance, by
referring to variables by their index in the variable context, variable
shadowing, where two variables share a name, is completely eliminated from
MIPSL.

MIPSL expressions also add a number of useful constructs to the pseudocode type.
One such pair is ~[_]~ and ~unbox~, which construct and destruct an array of
length one respectively. Another pair are ~fin~, which allows for arbitrary
computations on elements of finite sets, and ~asInt~, which converts a finite
value into an integer.

The three MIPSL operators most likely to cause confusion are ~merge~, ~slice~
and ~cut~. ~slice~ is analogous to bitstring slicing from the pseudocode, but
has some important differences. Instead of operating on bitstrings and integers,
~slice~ acts on ~array~ types. Integer slicing can be expressed using the
other operators of MIPSL, and in MIPSL a bitstring is a form of array. Another
difference is that the number of bits to take from the string is not specified
by a range, but is instead inferred from the output type. This is only possible
due to the powerful type inference features of Agda. Finally, the user only
needs to provide a single index into the array. By giving the index a ~fin~
datatype, and taking care with the length of arrays, MIPSL can guarantee that
the array indices are always in bounds for the entire range that is required.

The ~cut~ operation is an opposite to ~slice~. Whilst ~slice~ takes a contiguous
range of elements from the middle of an array, ~cut~ returns the two ends of the
array outside of the range. Finally, ~merge~ is a generalisation of
concatenation that allows inserting one array into another. These three
operations are part of an equivalence; for any arrays ~x~ and ~y~ and an
appropriate index ~i~, we have the following equalities:
- ~merge (slice x i) (cut x i) i ≟ x~
- ~slice (merge x y i) i ≟ x~
- ~cut (merge x y i) i ≟ y~

The ~Reference~ type is the name MIPSL gives to assignable expressions from the
pseudocode. The ~LocalReference~ type is identical to ~Reference~, except it
does not include global variables. In an earlier form of MIPSL, instead of
separate types for assignable expressions which can and cannot assign to state,
there were two predicates. However, this required carrying around a proof that
the predicate holds with each assignment. Whilst the impacts on performance were
unmeasured, it made proving statements with assignable expressions significantly
more difficult. Thankfully, Agda is able to resolve overloaded data type
constructors without much difficulty, meaning the use of ~Reference~ and
~LocalReference~ in MIPSL programs is transparent.

**** Example MIPSL Expressions
For those who have read [[*MIPSL Syntax Definition]], you may notice that whilst the
pseudocode defines a left-shift operator, there is no such operator in MIPSL.
Left shift can be defined in the following way:

#+begin_src agda2
_<<_ : Expression Σ Γ int → (n : ℕ) → Expression Σ Γ int
e << n = e * lit (ℤ.+ (2 ℕ.^ n))
#+end_src

This simple-looking expression has a lot of hidden complexity. First, consider
the type of the literal statement. The unary plus operation tells us that the
literal is an Agda integer. However, there are two MIPSL types with Agda
integers for literal values: ~int~ and ~real~. How does Agda correctly infer the
type? Recall that multiplication is polymorphic in MIPSL, with the result type
determined by "type addition" ([[MIPSL-type-addition]]). Agda knows that the
multiplication must return an ~int~, and that the first argument is also an
~int~, so by following the definition of ~_+ᵗ_~, it can infer that the second
multiplicand is an integer literal.

The previous section claimed that it is possible to define integer slicing
entirely using the other operations in MIPSL. Here is an expression that slices
a single bit from an integer, following the procedure by [cite/t:@arm/DDI0553B.s
§E1.3.3]:

#+begin_src agda2
getBit : ℕ → Expression Σ Γ int → Expression Σ Γ bit
getBit i x =
  if x - x >> suc i << suc i <? lit (ℤ.+ (2 ℕ.^ i))
  then lit false
  else lit true
#+end_src

The left-side of the inequality finds the residual of ~x~ modulo \(2 ^ {i+1}\).
Note that right-shift is defined to always round values down hence the modulus
is always positive. If the modulus is less than \(2^i\), then the bit in the
two's complement representation of ~x~ is ~0~, otherwise it is ~1~. This is
reflected in the output values of ~lit false~ and ~lit true~ respectively.

The last expression I will present is the ~uint~ function, which converts a
bitstring into an unsigned integer.

#+begin_src agda2
uint : Expression Σ Γ (bits m) → Expression Σ Γ int
uint {m = 0}           x = lit ℤ.0ℤ
uint {m = 1}           x =
  if unbox x
  then lit ℤ.1ℤ
  else lit ℤ.0ℤ
uint {m = suc (suc m)} x =
  lit (ℤ.+ 2) * uint (slice x (lit 1F)) +
  uint (cut x (lit 1F))
#+end_src

This Agda function is inductive on the number of bits in the argument. In the
first case, the unique zero-length bitstring is represented by the constant
\(0\). In the second case, a single bit can be represented by a conditional
statement: the integer \(1\) if the bit is true and \(0\) otherwise. If there
are two or more bits, the resulting expression sums two expressions. The left
addend is twice the recursive call on the high bits of the input. The right
addend is the representation of the least-significant bit as an integer.

In more concrete terms, here's the result of ~uint~ on a 4-bit string:

#+begin_src agda2
lit 2 *
(lit 2 *
 (lit 2 *
  (if unbox (slice (slice (slice x (lit 1F)) (lit 1F)) (lit 1F))
   then lit 1
   else lit 0)
  +
  (if unbox (cut (slice (slice x (lit 1F)) (lit 1F)) (lit 1F))
   then lit 1
   else lit 0))
 +
 (if unbox (cut (slice x (lit 1F)) (lit 1F))
  then lit 1
  else lit 0))
+
(if unbox (cut x (lit 1F))
 then lit 1
 else lit 0)
#+end_src

FIXME: why did I give this example at all? It's ugly, hard to understand, and if
anything only demonstrates why my project fails to work efficiently. This will
be cut if no one can find a purpose for it during review.

*** MIPSL Statements
MIPSL statements form a subset of statements allowed in the pseudocode. There
are three omissions: ~while...do~ loops, ~repeat...until~ loops and
~try...catch~ exception handling. Each of these structures is only used in
high-level constructs, such as the top-level execution loop
[cite:@arm/DDI0553B.s §E2.1.397], converting reals into floating-point
bitstrings [cite:@arm/DDI0553B.s §E2.1.166] and inserting instructions into the
instruction queue [cite:@arm/DDI0553B.s §E2.1.366].

The ultimate goal of MIPSL is to accurately represent the operation of a
specific instruction sequence on visible registers and memory, over simulating
the entirety of processor state.  Hence these high-level parts of the processor
behaviour, like managing the instruction queue, are not relevant.

Another argument for excluding these constructs is that it vastly complicates
the semantics of MIPSL. Of particular concern to Agda is that the presence of
potentially-unending loops, such as a ~while...do~ loop, would violate the
termination condition of Agda functions if it were included in a denotational
semantics.

Most of the statements that are present in MIPSL are unsurprising. The ~skip~
and sequencing (~_∙_~) statements should be familiar from the discussion on
Hoare logic, the assignment statement (~_≔_~) assigns a value into a reference,
the ~invoke~ statement calls a procedure and the ~if_then_else_~ statement
starts a conditional block.

Given that MIPSL has a ~skip~ statement and an ~if_then_else_~ control-flow
structure, including the ~if_then_~ statement may appear redundant. Ultimately,
the statement is redundant. It is regardless included in MIPSL for two reasons.
The first is ergonomics. ~if_then_~ statements appear many times more often in
the pseudocode than ~if_then_else_~ statements that omitting it would only serve
to complicate the code. The other reason is that including an ~if_then_else_~
statement makes the behaviour of a number of functions that manipulate MIPSL
code much easier to reason about.

The form of variable declarations is significantly different in MIPSL than it is
in the pseudocode. As variables in MIPSL are accessed by index into the variable
context instead of by name, MIPSL variable declarations do not need a name. In
addition, Agda can often infer the type of a declared variable from the context
in which it is used, making type annotations unnecessary. The last difference is
that all variables in MIPSL must be initialised. This simplifies the semantics
of MIPSL greatly, and prevents the use of uninitialised variables.

MIPSL makes a small modification to ~for~ loops that greatly improve the type
safety over what is achieved by the pseudocode. Instead of looping over a range
of dynamic values [cite:@arm/DDI0553B.s §E1.4.4], MIPSL loops perform a static
number of iterations, determined by an Agda natural ~n~. Then, instead of the
loop variable being an assignable integer expression, MIPSL introduces a new
variable with type ~fin n~.

MIPSL has a ~LocalStatement~ type as well as a ~Statement~ type. Whilst
~Statement~ can assign values into any ~Reference~, a ~LocalStatement~ can only
assign values into a ~LocalReference~. This means that ~LocalStatement~ cannot
modify global state, only local state.

**** Example MIPSL Statements
Here is a statement that copies elements from ~y~ into ~x~ if the corresponding
entry in ~mask~ is true:

#+begin_src agda2
copy : Statement Σ (array t n ∷ array t n ∷ array bool n ∷ [])
copy =
  for n (
    let i = var 0F in
    let x = var 1F in
    let y = var 2F in
    let mask = var 3F in

    if index mask i ≟ true
    then
        *index x i ≔ index y i
  )
#+end_src

This uses Agda functions ~index~ and ~*index~ to apply the appropriate slices,
casts and unboxing to extract an element from an array expression and reference,
respectively. One thing of note is the use of ~let...in~ to give variables
meaningful names. This is a stylistic choice that works well in this case.
Unfortunately, if the ~if_then_~ statement declared a new variable, these naming
variables would become useless, as the types would be different. For example
consider the following snippet:

#+begin_src agda2
VPTAdvance : Statement State (beat ∷ [])
VPTAdvance =
  declare (fin div2 (tup (var 0F ∷ []))) (
  declare (elem 4 (! VPR-mask) (var 0F)) (
    let vptState = var 0F in
    let maskId = var 1F in
    let beat = var 2F in

    if ! vptState ≟ lit (true ∷ false ∷ false ∷ false ∷ [])
    then
      vptState ≔ lit (Vec.replicate false)
    else if inv (! vptState ≟ lit (Vec.replicate false))
    then (
      declare (lit false) (
        let i = var 0F in
        let vptState = var 1F in
        -- let mask = var 2F in
        let beat = var 3F in

        cons vptState (cons i nil) ≔ call (LSL-C 0) (! vptState ∷ []) ∙
        if ! i
        then
          *elem 4 VPR-P0 beat ≔ not (elem 4 (! VPR-P0) beat))) ∙
    if getBit 0 (asInt beat)
    then
      *elem 4 VPR-mask maskId ≔ ! vptState))
#+end_src

This corresponds to the ~VPTAdvance~ procedure by [cite/t:@arm/DDI0553B.s
§E2.1.424]. Notice how every time a new variable is introduced, the variable
names have to be restated. Whilst this is a barrier when trying to write
programs in MIPSL, the type-safety guarantees and simplified proofs over using
named variables more than make up the difference.

*** MIPSL Functions and Procedures
Much like how a procedure in the pseudocode is a wrapper around a block of
statements, ~Procedure~ in MIPSL is a wrapper around ~Statement~. Note that
MIPSL procedures only have one exit point, the end of a statement, unlike the
pseudocode which has ~return~ statements. Any procedure using a ~return~
statement can be transformed into one that does not by a simple refactoring, so
MIPSL does not lose any expressive power over the pseudocode.

MIPSL functions are more complex than procedures. A function consists of a pair
of an ~Expression~ and ~LocalStatement~. The statement has the function
arguments and the return value as local variables, where the return value is
initialised to the result of the expression. The return value of the function is
then the final value of the return variable.

Functions and procedures are the reason MIPSL has two variable contexts, one
each for the global and local variables. The set of global variables is
unchanged across function and procedure boundaries. However, the set of local
variables is completely different. Having a separate variable for each in the
type is the cleanest way to represent this separation in Agda.

**** Example MIPSL Functions and Procedures
As ~Procedure~ is almost an alias for ~Statement~, examples of procedures can be
found in [[*Example MIPSL Statements]]. This is a simple function that converts a
bitstring to an unsigned or signed integer, depending on whether the second
argument is true or false:

#+begin_src agda2
Int : Function State (bits n ∷ bool ∷ []) int
Int =
  init
    if var 1F
    then uint (var 0F)
    else sint (var 0F) ∙
    skip
  end
#+end_src

The function body is the ~skip~ statement, meaning that whatever is initially
assigned to the return variable is the result of calling the function. The
initial value of the return variable is a simple conditional statement, calling
~uint~ or ~sint~ on the first argument as appropriate. Many functions that are
easy to inline have this form.

The ~GetCurInstBeat~ function by [cite/t:@arm/DDI0553B.s §E2.1.185] is one
function that benefits from the unusual representation of functions. A
simplified MIPSL version is given below.

#+begin_src agda2
GetCurInstrBeat : Function State [] (tuple (beat ∷ elmtMask ∷ []))
GetCurInstrBeat =
  init
    tup (! BeatId ∷ lit (Vec.replicate true) ∷ []) ∙ (
      let outA = head (var 0F) in
      let outB = head (tail (var 0F)) in
      if call VPTActive (! BeatId ∷ [])
      then
        outB ≔ !! outB and elem 4 (! VPR-P0) outA
    )
  end
#+end_src

The function initialises a default return value, and then modifies it based on
the current state of execution. This is easy to encode in the MIPSL function
syntax. The return variable is initialised to the default value, and the
function body performs the necessary manipulations.

In this way a function is much like a ~declare~ statement. However, instead of
discarding the declared variable when it leaves scope, a function returns it to
the caller.

** MIPSL Semantics
The ultimate goal of this work is to formally verify an assembly algorithm
written for the Armv8.1-M architecture that performs the number theoretic
transform. So far we have discussed the syntactic form of MIPSL, which is a
language similar to the Armv8-M pseudocode specification language, written in
Agda. We have also given a brief high-level semantics of MIPSL. Formal
verification requires a much more detailed description of the semantics than
what has been given so far.

This section starts with a brief discussion of how to model MIPSL types. This
addresses the burning question of how to model real numbers in Agda.  From this,
we discuss the denotational semantics of MIPSL, and how MIPSL program elements
can be converted into a number of different Agda function types. The section
ends with a presentation of a Hoare logic for MIPSL, allowing for efficient
syntax-directed proofs of statements.

*** MIPSL Datatype Models
#+name: MIPSL-type-models
#+caption: The semantic encoding of MIPSL data types. The use of ~Lift~ is to
#+caption: ensure all the encodings occupy the same Agda universe level.
#+begin_src agda2
⟦_⟧ₜ  : Type → Set ℓ
⟦_⟧ₜₛ : Vec Type n → Set ℓ

⟦ bool ⟧ₜ      = Lift ℓ Bool
⟦ int ⟧ₜ       = Lift ℓ ℤ
⟦ fin n ⟧ₜ     = Lift ℓ (Fin n)
⟦ real ⟧ₜ      = Lift ℓ ℝ
⟦ tuple ts ⟧ₜ  = ⟦ ts ⟧ₜₛ
⟦ array t n ⟧ₜ = Vec ⟦ t ⟧ₜ n

⟦ [] ⟧ₜₛ          = Lift ℓ ⊤
⟦ t ∷ [] ⟧ₜₛ      = ⟦ t ⟧ₜ
⟦ t ∷ t₁ ∷ ts ⟧ₜₛ = ⟦ t ⟧ₜ × ⟦ t₁ ∷ ts ⟧ₜₛ
#+end_src

To be able to write a denotational semantics for a language, the first step is
to find a suitable encoding for the data types. In this case, we have to be able
to find encodings of MIPSL types within Agda. [[MIPSL-type-models]] shows the full
encoding function. Most of the choices are fairly trivial; Agda Booleans for
~bool~, Agda vectors for ~array t n~ and the Agda finite set type ~Fin n~ for
the MIPSL type ~fin n~.

# RESOLVED: bit is an alias for bool.
# In this encoding, ~bool~ and ~bit~ are both represented by Agda Booleans. This
# again begs the question of why they are distinct types in MIPSL and are not
# unified into one. The philosophical reasoning used by (FIXME: textcite) is now
# diluted; ~array~ is used for abstract arrays and physical bitstrings. (FIXME:
# why are they distinct?)

Tuples are the next simplest type, being encoded as an n-ary product. This is
the action of the ~⟦_⟧ₜₛ~ function in [[MIPSL-type-models]]. Unfortunately the Agda
standard library does not have a dependent n-ary product type. In any case, the
Agda type checker would not accept its usage in this case due to termination
checking, hence the manual inductive definition.

(ALTERNATIVE 1: ~int~ stays as abstract discrete ordered commutative ring)

The other two MIPSL types are ~int~, ~real~. Whilst ~int~ could feasibly be
encoded by the Agda integer type, there is no useful Agda encoding for
mathematical real numbers. Because of this, both numeric types are represented
by abstract types with the appropriate properties. ~int~ is represented by a
discrete ordered commutative ring ℤ and ~real~ by a field ℝ. We also require
that there is a split ring monomorphism \(\mathtt{/1} : ℤ \to ℝ\) with
retraction \(\mathtt{⌊\_⌋} : ℝ \to ℤ\). \(\mathtt{⌊\_⌋}\) may not be a ring
homomorphism, but it must preserve \(\le\) ordering and satisfy the floor
property:

\[
\forall x y. x < y \mathtt{/1} \implies ⌊ x ⌋ < y
\]

(ALTERNATIVE 2: ~int~ becomes integer and ~real~ stays abstract)

The other two MIPSL types are ~int~ and ~real~. ~int~ is encoded by the Agda
integer type. However, there is no useful Agda encoding for mathematical real
numbers. Because of this, it is represented by an abstract type ℝ such that ℝ is
a field. We also require that there is a split ring monomorphism \(\mathtt{/1} :
ℤ \to ℝ\) with retraction \(\mathtt{⌊\_⌋} : ℝ \to ℤ\). \(\mathtt{⌊\_⌋}\) may not
be a ring homomorphism, but it must preserve \(\le\) ordering and satisfy the
floor property:

\[
\forall x y. x < y \mathtt{/1} \implies ⌊ x ⌋ < y
\]

(ALTERNATIVE 3: ~real~ becomes rational.)

The other two MIPSL types are ~int~ and ~real~. ~int~ is encoded by the Agda
integer type. However, there is no useful Agda encoding for mathematical real
numbers. This can be approximated using the Agda rational type ℚ. Whilst this
clearly cannot encode all real numbers, it satisfies nearly all of the
properties required by the pseudocode real-number type. The only missing
operation is square-root, which is unnecessary for the proofs MIPSL is designed
for.

(END ALTERNATIVES)

*** Denotational Semantics

#+name: MIPSL-denotational-prototypes
#+caption: Function prototypes for the denotational semantics of different MIPSL
#+caption: program elements. All of them become functions from the current
#+caption: variable context into some return value.
#+begin_src agda2
expr      : Expression Σ Γ t        → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
exprs     : All (Expression Σ Γ) ts → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ ts ⟧ₜₛ
ref       : Reference Σ Γ t         → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
locRef    : LocalReference Σ Γ t    → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
stmt      : Statement Σ Γ           → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ
locStmt   : LocalStatement Σ Γ      → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ
fun       : Function Σ Γ t          → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
proc      : Procedure Σ Γ           → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ
#+end_src

The denotational semantics has to represent the different MIPSL program elements
as mathematical objects. In this case, due to careful design of MIPSL's syntax,
each of the elements is represented by a total function.
[[MIPSL-denotational-prototypes]] shows the prototypes of the different semantic
interpretation functions, and the full definition is in [[*MIPSL Denotational
Semantics]]. Each function accepts the current variable context as an argument.
Because the variable contexts are an ordered sequence of values of different
types, they can be encoded in the same way as tuples.

**** Expression Semantics

The semantic representation of an expression converts the current variable
context into a value with the same type as the expression. Most cases are pretty
simple. For example, addition is the sum of the values of the two subexpressions
computed recursively. One of the more interesting cases are global and local
variables, albeit this is only a lookup in the variable context for the current
value. This lookup is guaranteed to be safe due to variables being a lookup into
the current context. Despite both being a subsets of the ~Expression~ type,
~Reference~ and ~LocalReference~ require their own functions to satisfy the
demands of the termination checker.

One significant omission from this definition is the checking of evaluation
order. Due to the design choice that MIPSL functions cannot modify global state,
and that no MIPSL expression can modify state, expressions have the same value
no matter the order of evaluation for sub-expressions. This is also reflected in
the type of the denotational representation of expressions. It can only possibly
return a value and not a modified version of the state.

**** Assignment Semantics
#+name: MIPSL-denotational-assign-prototypes
#+caption: Function prototypes for the ~assign~ and ~locAssign~ helper
#+caption: functions. The arguments are the reference, new value, original
#+caption: variable context and the context to update. The original context is
#+caption: needed to evaluate expressions within the reference.
#+begin_src agda2
assign    : Reference Σ Γ t      → ⟦ t ⟧ₜ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ →
            ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ
locAssign : LocalReference Σ Γ t → ⟦ t ⟧ₜ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ →
            ⟦ Γ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ
#+end_src

Before considering statements as a whole, we start with assignment statements.
If assignments were only into variables, this would be a trivial update to the
relevant part of the context. However, the use of ~Reference~ makes things more
tricky. Broadly speaking, there are four types of ~Reference~: terminal
references like ~state~, ~var~ and ~nil~; isomorphism operations like ~unbox~,
~[_]~ and ~cast~; product operations like ~merge~ and ~cons~; and projection
operations like ~slice~, ~cut~, ~head~ and ~tail~.

We will consider how to update each of the four types of references in turn,
which is the action performed by helper functions ~assign~ and ~locAssign~, the
signatures of which are given in [[MIPSL-denotational-assign-prototypes]].

Terminal references are the base case and easy. Assigning into ~state~ and ~var~
updates the relevant part of the variable context, whilst assigning into
~nil~---the unique nullary product---is a no-op. Isomorphic reference operations
are also relatively simple to assign into. First, transform the argument using
the inverse operation, and assign that into the sub-reference. For example, the
assignment ~[ ref ] ≔ v~ is the same as ~ref ≔ unbox v~.

Assigning into product reference operations is also not too complex, by
assigning into each projection in turn. For example, ~cons r₁ r₂ ≔ v~ is
equivalent to ~r₁ ≔ head v ∙ r₂ ≔ tail v~. However, some care must be taken to
ensure only that any expressions in the second assignment use the original, and
not updated, variable context. This is why ~assign~ and ~locAssign~ have the
extra context argument.

The final type of reference to consider are the projection reference operations.
Assigning into one projection of a reference means that the other part remains
unchanged. Consider the assignment ~head r ≔ v~ as an example. This is
equivalent to ~r ≔ cons v (tail r)~, which makes it clear that the second
projection remains constant. The second projection must be computed using the
original variable context.

This interplay between product and projection reference types is a large part of
the reason why MIPSL has ~merge~, ~cut~ and ~slice~ instead of the bitstring
concatenation and slicing present in the Armv8-M pseudocode specification
language.  There is no way to form a product-projection triple with only
bitstring joining and slicing, so any denotational semantics with these
operations would require merge and cut operations on the encoding of values.
MIPSL takes these semantic necessities and makes them available to programmers.

~assign~ and ~locAssign~, when given a reference and initial context, return
unary operations on the full and local variable contexts respectively. As
~Reference~ includes both ~state~ and ~var~, assigning into a reference can
modify both global and local references. In contrast, ~LocalReference~ only
features ~var~, so can only modify local variables.

**** Statement Semantics
Compared to assignment, the semantics of other statements are trivial to
compute. Skip statements map to the identity function and sequencing is function
composition, reflecting that they do nothing and compose statements together
respectively. As expressions cannot modify state, ~if_then_else_~ and ~if_then_~
statements become simple---evaluate the condition and both branches on the input
state, and return the branch depending on the value of the condition. Local
variable declarations are also quite simple. The initial value is computed and
added to the variable context. After evaluating the subsequent statement, the
final value of the new variable is stripped away from the context.

The only looping construct in MIPSL is the ~for~ loop. Because it performs a
fixed number of iterations, it too has easy-to-implement denotational semantics.
This is because it is effectively a fixed number of ~declare~ statements all
sequenced together. This is also one of the primary reasons why the denotational
semantics can have poor computational performance; every iteration of the ~for~
loop must be evaluated individually.

~stmt~ and ~locStmt~ return the full context and only the local variables
respectively. This is because only ~Statement~ can include ~Reference~ which can
reference global state. On the other hand, ~LocalReference~ used by
~LocalStatement~ can only refer to, and hence modify, local state.

**** Function and Procedure Semantics
Finally there are ~proc~ and ~fun~ for denoting procedures and functions. ~proc~
returns the global state only; ~Procedure~ is a thin wrapper around ~Statement~,
which modifies both local and global state. However, the local state is lost
when leaving a procedure, hence ~proc~ only returns the global part. ~fun~
behaves a lot like a ~declare~ statement. It initialises the return variable to
the given expression, then evaluates the ~LocalStatement~ body. Unlike
~declare~, which discards the added variable upon exiting the statement, ~fun~
instead returns the value of that variable. As ~LocalStatement~ cannot modify
global state, and the other local variables are lost upon exiting the function,
only this one return value is necessary.

*** Hoare Logic Semantics
The final form of semantics specified for MIPSL is a form of Hoare logic. Unlike
the denotational semantics, which must perform a full computation, the Hoare
logic is syntax-directed; loops only require a single proof. This section starts
by explaining how a MIPSL ~Expression~ is converted into a ~Term~ for use in
Hoare logic assertions. Then the syntax and semantics of the ~Assertion~ type is
discussed before finally giving the form of correctness triples for MIPSL.

**** Converting ~Expression~ into ~Term~
As discussed in [[*Hoare Logic]], a simple language such as WHILE can use
expressions as terms in assertions directly. The only modification required is
the addition of auxiliary variables. MIPSL is not as simple a language as WHILE,
thanks to the presence of function calls in expressions. Whilst function calls
do not prevent converting expressions into terms, some care must be taken. In
particular, this conversion is only possible due to the pure nature of MIPSL
functions; it would not be possible if functions modified global variables. The
full definition of ~Term~ and its semantics are given in [[*MIPSL Hoare Logic
Definitions]].

First, a demonstration on why function calls need special care in Hoare logic.
We will work in an environment with a single Boolean-valued global variable.
Consider the following MIPSL function, a unary operator on an integer:

#+begin_src agda2
f : Function [ bool ] [ int ] int
f =
  init
    var 0F ∙
    let x = var 1F in
    let ret = var 0F in
    if state 0F
      then ret ≔ lit 1ℤ + x
  end
#+end_src

When the global variable ~state 0F~ is false, this is the identity function.
However, when ~state 0F~ is true, ~f~ performs an increment.

Consider an expression ~e : Expression [ bool ] Γ int~ of ~call f [ x ]~.  There
are three important aspects we need to consider for converting ~e~ into a term:
the initial conversion; substitution of variables; and the semantics. The
simplest conversion is to keep the function call as-is, and simply recursively
convert ~x~ into a term. This would result in a term ~e′ = call f [ x′ ]~, using
~′~ to indicate the term embedding.

What would happen when we try and substitute ~state 0F~ for ~t~, a term
involving local variables in ~Γ~, into ~e′~? As ~f~ refers to ~state 0F~, it
must be modified in some way. However, ~Γ~ is a different variable context from
~[ int ]~, so there is no way of writing ~t~ inside of ~f~. This embedding is
not sufficient.

A working solution comes from the insight that a ~Function~ in MIPSL can only
read from global variables, and never write to them. Instead of thinking of ~f~
as a function with a set of global variables and a list of arguments, you can
consider ~f~ to be a function with two sets of arguments. In an ~Expression~,
the first set of arguments always corresponds exactly with the global variables,
so is elided. We can then define an embedding function ~↓_~, such that ~↓ e =
call f [ state 0F ] [ ↓ x ]~, and all the other expression forms as expected.
This makes the elided arguments to ~f~ explicit.

Doing a substitution on ~↓ e~ is now simple: perform the substitution on both
sets of arguments recursively, and leave ~f~ unchanged. As the first set of
arguments correspond exactly to the global variables in ~f~, the substitution
into those arguments appears like a substitution into ~f~ itself.

The last major consideration of this embedding is how to encode its semantics.
To be able to perform logical implications within Hoare logic, it is necessary
to have a semantic interpretation for assertions and thus terms. Going back to
~↓ e~, we already have a denotational semantics for ~f~. Hence we only need to
consider the global and local variables we pass to ~f~ to get the value. We
simply pass ~f~ the values of the global and local argument lists for the values
of the global and local arguments respectively. Thus ~↓ e~ is a valid conversion
from ~Expression~ to ~Term~.

The only other difference between ~Expression~ and ~Term~ is the use of
auxiliary variables within Hoare logic terms. MIPSL accomplishes this by
providing a ~meta~ constructor much like ~state~ and ~var~. This indexes into a
new auxiliary variable context which forms part of the type definition of
~Term~.

**** Hoare Logic Assertions
An important part of Hoare logic is the assertion language used within the
correctness triples. The Hoare logic for MIPSL uses a first-order logic, which
allows for the easy proof of many logical implications at the expense of not
being complete over the full set of state properties. The full definition and
semantics of the ~Assertion~ type are in [[*MIPSL Hoare Logic Definitions]].

The ~Assertion~ type has the usual set of Boolean connectives: ~true~, ~false~,
~_∧_~, ~_∨_~, ~¬_~ and ~_→_~. When compared to the ~fin~ MIPSL expression, which
performs arbitrary manipulations on finite sets, using this fixed set of
connectives may appear restrictive. The primary reason in favour of a fixed set
of connectives is that the properties are well-defined. This makes it possible
to prove properties about the ~Assertion~ type within proofs that would not be
possible if assertions could use arbitrary connectives.

Another constructor of ~Assertion~ is ~pred~, which accepts an arbitrary
Boolean-valued ~Term~. This is the only way to test properties of the current
program state within assertions. As nearly all types have equality comparisons,
~pred~ can encode equality and inequality constraints on values. Furthermore, as
~Term~ embeds ~Expression~, many complex computations can be performed within
~pred~. To allow equality between two terms of any type, there is an ~equal~
function to construct an appropriate assertion.

The final two constructors of ~Assertion~ provide first-order quantification
over auxiliary variables. ~all~ provides universal quantification and ~some~
provides existential quantification.

Semantically, an assertion is a predicate on the current state of execution. For
MIPSL, this state is the current global, local and auxiliary variable contexts.
As is usual in Agda, the predicates encoded as an indexed family of sets.

The Boolean connectives are represented by their usual type-theoretic
counterparts: the unit type for ~true~, the empty type for ~false~, product
types for ~_∧_~, sum types for ~_∨_~, function types for ~_→_~ and the negation
type for ~¬_~.

Quantifier assertions are also quite easy to give a semantic representation. For
universal quantification, you have a function taking values of the type of the
auxiliary variable, which returns the encoding of the inner assertion with
auxiliary context extended by this value. For existential quantification, you
instead have a dependent pair of a value with the auxiliary variable type, and
semantic encoding of the inner assertion.

The final ~Assertion~ form to consider is ~pred~. This first evaluates the
associated Boolean term. If true, the semantics returns the unit type.
Otherwise, it returns the empty type. For a language where value equality can
have many different values, some readers may feel like reducing those equalities
to a binary result loses information. Providing this information to the user
would require a way to convert Boolean-valued terms into a normal form,
with an inequality operator at the root. This conversion would be highly
non-trivial, especially due to the presence of function calls in terms.

Fortunately, all equalities and inequalities between MIPSL values are decidable,
either by construction of the type for Booleans and finite sets, or by
specification for integers and real numbers. This allows the user to extract
Agda terms for equalities given only knowledge of whether terms are equal.

**** Correctness Triples for MIPSL
In the traditional presentation of Hoare logic ([[*Hoare Logic]]), there are two
types of rule; structural rules based on program syntax and adaptation rules to
modify preconditions and postconditions. The Hoare logic for MIPSL unifies the
two forms of rules, eliminating the need to choose which type of rule to use
next. This allows for purely syntax-directed proofs for any choice of
precondition and postcondition.

#+name: MIPSL-correctness-triples
#+caption: The Hoare logic correctness triples for MIPSL. This combines the
#+caption: structural and adaptation rules you would find in traditional
#+caption: renderings of Hoare logic into a single set of structural rules.
#+begin_src agda2
data HoareTriple (P : Assertion Σ Γ Δ) (Q : Assertion Σ Γ Δ) :
                 Statement Σ Γ → Set (ℓsuc ℓ) where
  seq     : ∀ R → HoareTriple P R s → HoareTriple R Q s₁ → HoareTriple P Q (s ∙ s₁)
  skip    : P ⊆ Q → HoareTriple P Q skip
  assign  : P ⊆ subst Q ref (↓ val) → HoareTriple P Q (ref ≔ val)
  declare : HoareTriple
              (Var.weaken 0F P ∧ equal (var 0F) (Term.Var.weaken 0F (↓ e)))
              (Var.weaken 0F Q)
              s →
            HoareTriple P Q (declare e s)
  invoke  : let metas = All.map (Term.Meta.inject Δ) (All.tabulate meta) in
            let varsToMetas = λ P → Var.elimAll (Meta.weakenAll [] Γ P) metas in
            let termVarsToMetas =
              λ t → Term.Var.elimAll (Term.Meta.weakenAll [] Γ t) metas in
            HoareTriple
              ( varsToMetas P
              ∧ equal (↓ tup (All.tabulate var)) (termVarsToMetas (↓ tup es))
              )
              (varsToMetas Q)
              s →
            HoareTriple P Q (invoke (s ∙end) es)
  if      : HoareTriple (P ∧ pred (↓ e)) Q s →
            P ∧ pred (↓ inv e) ⊆ Q →
            HoareTriple P Q (if e then s)
  if-else : HoareTriple (P ∧ pred (↓ e)) Q s →
            HoareTriple (P ∧ pred (↓ inv e)) Q s₁ →
            HoareTriple P Q (if e then s else s₁)
  for     : (I : Assertion _ _ (fin _ ∷ _)) →
            P ⊆ Meta.elim 0F I (↓ lit 0F) →
            HoareTriple {Δ = _ ∷ Δ}
              ( Var.weaken 0F
                  (Meta.elim 1F (Meta.weaken 0F I)
                                (fin inject₁ (cons (meta 0F) nil)))
              ∧ equal (meta 0F) (var 0F)
              )
              (Var.weaken 0F
                 (Meta.elim 1F (Meta.weaken 0F I)
                               (fin suc (cons (meta 0F) nil))))
              s →
            Meta.elim 0F I (↓ lit (fromℕ m)) ⊆ Q →
            HoareTriple P Q (for m s)
#+end_src

We will now talk through each of the Hoare logic rules for MIPSL, which are
given in [[MIPSL-correctness-triples]]. The simplest rule to consider is ~skip~.
This immediately demonstrates how MIPSL Hoare logic combines structural and
adaptation rules. A purely structural rule for ~skip~ would be ~HoareTriple P P
skip~; the ~skip~ statement has no effect on the current state. By combining
this with the rule of consequence, a ~skip~ statement allows for logical
implication.

The ~seq~ rule is as you would expect for anyone familiar with Hoare logic. The
only potential surprise is that the intermediate assertion has to be given
explicitly. This is due to Agda being unable to infer the assertion ~Q~ from the
numerous manipulations applied to it by the other correctness rules.

Another pair of simple rules are ~if~ and ~if-else~. In fact, the ~if-else~ rule
is identical to the corresponding Hoare logic rule from WHILE, and ~if~ only
differs by directly substituting in a ~skip~ statement for the negative branch.

The final trivial rule is ~assign~. Like the ~skip~ rule, the ~assign~ rule
combines the structural and adaptation rules of WHILE into a single Hoare logic
rule for MIPSL. A purely structural rule would have ~subst Q ref (↓ val)~ as the
precondition of the statement. MIPSL combines this with the rule of consequence
to allow for an arbitrary precondition.

The other Hoare logic rules for MIPSL are decidedly less simple. Most of the
added complexity is a consequence of MIPSL's type safety. For example, whilst it
is trivial to add a free variable to an assertion on paper, doing so in a
type-safe way for the ~Assertion~ type requires constructing a whole new Agda
term, as the variable context forms part of the type.

The ~declare~ rule is the simplest of the three remaining. The goal is to
describe a necessary triple on ~s~ such that ~HoareTriple P Q (declare e s)~ is
a valid correctness triple. First, note that ~P~ and ~Q~ have type ~Assertion Σ
Γ Δ~, whilst ~s~ has type ~Statement Σ (t ∷ Γ)~ due to the declaration
introducing a new variable. To be able to use ~P~ and ~Q~, they have to be
weakened to the type ~Assertion Σ (t ∷ Γ) Δ~, achieved by calling ~Var.weaken
0F~. We will denote the weakened forms ~P′~ and ~Q′~ for brevity. The new triple
we have is ~HoareTriple P′ Q′ s~. However, this does not constrain the new
variable. Thus we assert that the new variable ~var 0F~ is equal to the initial
value ~e~.  However, ~e~ has type ~Expression Σ Γ~ and we need a ~Term Σ (t ∷ Γ)
Δ~. Hence we must instead use ~Term.Var.weaken 0F (↓ e)~, denoted ~e′~ , which
converts ~e~ to a term and introduces the new variable. This finally gives us
the triple we need: ~HoareTriple (P′ ∧ equal (var 0F) e′) Q′ s~.

I will go into less detail whilst discussing ~invoke~ and ~for~, due to an even
greater level of complexity. The ~for~ rule is the simpler case, so I will start
there. The form of the ~for~ rule was inspired from the WHILE rule for a ~while~
loop, but specialised to a form with a fixed number of iterations.

Given a ~for n s~ statement, we first choose a loop invariant ~I : Assertion Σ Γ
(fin (suc n) ∷ Δ)~. The additional auxiliary variable indicates the number of
complete iterations of the loop, from \(0\) to \(n\). We will use ~I(x)~ to
denote the assertion ~I~ with the additional auxiliary variable replaced with
term ~x~, and make weakening variable contexts implicit. We require that ~P ⊆
I(0)~ and ~I(n) ⊆ Q~ to ensure that the precondition and postcondition are an
adaptation of the loop invariant. The final part to consider is the correctness
triple for ~s~. We add in a new auxiliary variable representing the value of the
loop variable. This is necessary to ensure the current iteration number is
preserved between the precondition and postcondition, as the loop variable
itself can be modified by ~s~. We then require that the following triple holds:
~HoareTriple (I(meta 0F) ∧ equal (meta 0F) (var 0F)) I(1+ meta 0F) s~. This
ensures that ~I~ remains true across the loop iteration, for each possible value
of the loop variable.

Notice that unlike the denotational semantics, which would explicitly execute
each iteration of a loop, the Hoare logic instead requires only a single proof
term for all iterations of the loop. This is one of the primary benefits of
using Hoare logic over the denotational semantics; it has a much lower
computational cost.

The final Hoare logic rule for MIPSL is ~invoke~. Procedure invocation is tricky
in MIPSL's Hoare logic due to the changing local variable scope in the procedure
body. Of particular note, any local variables in the precondition and
postcondition for a procedure invocation cannot be accessed nor modified by the
procedure body. This is the inspiration for the form of the ~invoke~ rule.

To construct ~HoareTriple P Q (invoke (s ∙end) es)~, we first consider the form
~P~ and ~Q~ will take in a correctness triple for ~s~. Note that local variables
in ~P~ and ~Q~ are immutable within ~s~, due to the changing local variable
scope. Also note that the local variables cannot be accessed using ~var~; ~P~
and ~Q~ have type ~Assertion Σ Γ Δ~, but ~s~ has type ~Statement Σ Γ′~ for some
context ~Γ′~ independent of ~Γ~. As the original local variables are immutable
during the invocation, we can replace them with auxiliary variables, by
assigning a new auxiliary variable for each one. Within ~P~ and ~Q~, we then
replace all ~var x~ with ~meta x~ to reflect that the local variables have been
moved to auxiliary variables. This is the action performed by the ~varsToMetas~
function. Finally, we have to ensure that the local variables within the
procedure body are initially set to the invocation arguments. Like ~P~ and ~Q~,
the local variables in ~es~ have to be replaced with the corresponding auxiliary
variables. This substitution is done by ~termVarsToMetas~.

Example uses of these rules, particularly ~invoke~ and ~for~, are given in
(FIXME: forward reference).

* Using MIPSL in Proofs
We have now fully defined MIPSL, a programming language that imitates the
Armv8-M pseudocode specification language. Whilst the specification of a new
language is interesting by itself, the purpose of MIPSL was to enable the formal
verification of algorithms written for the Armv8-M instruction set.

This chapter demonstrates the suitability of MIPSL for this purpose by first
using MIPSL to encode the behaviour of vector-based Barrett reduction, as
specified by (FIXME: textcite). Then we will instantiate MIPSL with a concrete
model for its ~int~ and ~real~ types and explore how MIPSL is used to verify a
number of concrete examples, giving evidence for the correctness of MIPSL's
semantics. This will be followed up by a discussion on how to abstract away
details of this proof to show the Barrett reduction algorithm is correct on any
set of inputs. The chapter will conclude with a proof sketch for the correctness
of the MIPSL Hoare logic rules compared to the denotational semantics, showing
they are a useful tool.

** Vector Barrett Reduction in MIPSL
Barrett reduction is a useful subroutine in the NTT algorithm. Briefly, it takes
an integer and finds a "small" representable modulo some fixed base. Using the
M-profile vector extension for the Armv8.1-M instruction set, the algorithm can
be applied to a vector of four 32-bit values using only two instructions, given
in [[barrett-asm]].

#+name: barrett-asm
#+caption: Assembly instructions for Barrett reduction of a vector of values
#+caption: using the M-profile vector extension (FIXME: cite).
#+begin_src asm
; Logical inputs:
; - n : modulo base
; Register inputs:
; - -n : general register with value -n
; - m  : general register with value ⌊ 2^32 / n ⌉
; - z  : input vector register
; - t  : output vector register
VQRDMULH.s32 t z m
VMLA.s32     z t -n
#+end_src

(FIXME: how much detail is needed? The word limit exists)

** Concrete Examples of Semantics


* Conclusions
** Future Work

#+print_bibliography:

\appendix



#+latex: %TC:ignore
* MIPSL Syntax Definition
#+begin_src agda2
data Expression     (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data Reference      (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data LocalReference (Σ : Vec Type o) (Γ : Vec Type n) : Type → Set
data Statement      (Σ : Vec Type o) (Γ : Vec Type n) : Set
data LocalStatement (Σ : Vec Type o) (Γ : Vec Type n) : Set
data Function       (Σ : Vec Type o) (Γ : Vec Type n) (ret : Type) : Set
data Procedure      (Σ : Vec Type o) (Γ : Vec Type n) : Set

data Expression Σ Γ where
  lit           : literalType t → Expression Σ Γ t

  state         : ∀ i → Expression Σ Γ (lookup Σ i)

  var           : ∀ i → Expression Σ Γ (lookup Γ i)

  _≟_           : ⦃ HasEquality t ⦄ →
                  Expression Σ Γ t →
                  Expression Σ Γ t →
                  Expression Σ Γ bool

  _<?_          : ⦃ Ordered t ⦄ →
                  Expression Σ Γ t →
                  Expression Σ Γ t →
                  Expression Σ Γ bool

  inv           : Expression Σ Γ bool → Expression Σ Γ bool

  _&&_          : Expression Σ Γ bool →
                  Expression Σ Γ bool →
                  Expression Σ Γ bool

  _||_          : Expression Σ Γ bool →
                  Expression Σ Γ bool →
                  Expression Σ Γ bool

  not           : Expression Σ Γ (bits n) → Expression Σ Γ (bits n)

  _and_         : Expression Σ Γ (bits n) →
                  Expression Σ Γ (bits n) →
                  Expression Σ Γ (bits n)

  _or_          : Expression Σ Γ (bits n) →
                  Expression Σ Γ (bits n) →
                  Expression Σ Γ (bits n)

  [_]           : Expression Σ Γ t → Expression Σ Γ (array t 1)

  unbox         : Expression Σ Γ (array t 1) → Expression Σ Γ t

  merge         : Expression Σ Γ (array t m) →
                  Expression Σ Γ (array t n) →
                  Expression Σ Γ (fin (suc n)) →
                  Expression Σ Γ (array t (n ℕ.+ m))

  slice         : Expression Σ Γ (array t (n ℕ.+ m)) →
                  Expression Σ Γ (fin (suc n)) →
                  Expression Σ Γ (array t m)

  cut           : Expression Σ Γ (array t (n ℕ.+ m)) →
                  Expression Σ Γ (fin (suc n)) →
                  Expression Σ Γ (array t n)

  cast          : .(eq : m ≡ n) →
                  Expression Σ Γ (array t m) →
                  Expression Σ Γ (array t n)

  -_            : ⦃ IsNumeric t ⦄ → Expression Σ Γ t → Expression Σ Γ t

  _+_           : ⦃ isNum₁ : IsNumeric t₁ ⦄ →
                  ⦃ isNum₂ : IsNumeric t₂ ⦄ →
                  Expression Σ Γ t₁ →
                  Expression Σ Γ t₂ →
                  Expression Σ Γ (isNum₁ +ᵗ isNum₂)

  _*_           : ⦃ isNum₁ : IsNumeric t₁ ⦄ →
                  ⦃ isNum₂ : IsNumeric t₂ ⦄ →
                  Expression Σ Γ t₁ →
                  Expression Σ Γ t₂ →
                  Expression Σ Γ (isNum₁ +ᵗ isNum₂)

  _^_           : ⦃ IsNumeric t ⦄ →
                  Expression Σ Γ t →
                  (n : ℕ) →
                  Expression Σ Γ t

  _>>_          : Expression Σ Γ int →
                  (n : ℕ) →
                  Expression Σ Γ int

  rnd           : Expression Σ Γ real → Expression Σ Γ int

  fin           : ∀ {ms} →
                  (f : literalTypes (map fin ms) → Fin n) →
                  Expression Σ Γ (tuple {n = k} (map fin ms)) →
                  Expression Σ Γ (fin n)

  asInt         : Expression Σ Γ (fin n) → Expression Σ Γ int

  nil           : Expression Σ Γ (tuple [])

  cons          : Expression Σ Γ t →
                  Expression Σ Γ (tuple ts) →
                  Expression Σ Γ (tuple (t ∷ ts))

  head          : Expression Σ Γ (tuple (t ∷ ts)) → Expression Σ Γ t

  tail          : Expression Σ Γ (tuple (t ∷ ts)) → Expression Σ Γ (tuple ts)

  call          : (f : Function Σ Δ t) →
                  All (Expression Σ Γ) Δ →
                  Expression Σ Γ t

  if_then_else_ : Expression Σ Γ bool →
                  Expression Σ Γ t →
                  Expression Σ Γ t →
                  Expression Σ Γ t


data Reference Σ Γ where
  state : ∀ i → Reference Σ Γ (lookup Σ i)

  var   : ∀ i → Reference Σ Γ (lookup Γ i)

  [_]   : Reference Σ Γ t → Reference Σ Γ (array t 1)

  unbox : Reference Σ Γ (array t 1) → Reference Σ Γ t

  merge : Reference Σ Γ (array t m) →
          Reference Σ Γ (array t n) →
          Expression Σ Γ (fin (suc n)) →
          Reference Σ Γ (array t (n ℕ.+ m))

  slice : Reference Σ Γ (array t (n ℕ.+ m)) →
          Expression Σ Γ (fin (suc n)) →
          Reference Σ Γ (array t m)

  cut   : Reference Σ Γ (array t (n ℕ.+ m)) →
          Expression Σ Γ (fin (suc n)) →
          Reference Σ Γ (array t n)

  cast  : .(eq : m ≡ n) →
          Reference Σ Γ (array t m) →
          Reference Σ Γ (array t n)

  nil   : Reference Σ Γ (tuple [])

  cons  : Reference Σ Γ t →
          Reference Σ Γ (tuple ts) →
          Reference Σ Γ (tuple (t ∷ ts))

  head  : Reference Σ Γ (tuple (t ∷ ts)) → Reference Σ Γ t

  tail  : Reference Σ Γ (tuple (t ∷ ts)) → Reference Σ Γ (tuple ts)


data LocalReference Σ Γ where
  var   : ∀ i → LocalReference Σ Γ (lookup Γ i)

  [_]   : LocalReference Σ Γ t → LocalReference Σ Γ (array t 1)

  unbox : LocalReference Σ Γ (array t 1) → LocalReference Σ Γ t

  merge : LocalReference Σ Γ (array t m) →
          LocalReference Σ Γ (array t n) →
          Expression Σ Γ (fin (suc n)) →
          LocalReference Σ Γ (array t (n ℕ.+ m))

  slice : LocalReference Σ Γ (array t (n ℕ.+ m)) →
          Expression Σ Γ (fin (suc n)) →
          LocalReference Σ Γ (array t m)

  cut   : LocalReference Σ Γ (array t (n ℕ.+ m)) →
          Expression Σ Γ (fin (suc n)) →
          LocalReference Σ Γ (array t n)

  cast  : .(eq : m ≡ n) →
          LocalReference Σ Γ (array t m) →
          LocalReference Σ Γ (array t n)

  nil   : LocalReference Σ Γ (tuple [])

  cons  : LocalReference Σ Γ t →
          LocalReference Σ Γ (tuple ts) →
          LocalReference Σ Γ (tuple (t ∷ ts))

  head  : LocalReference Σ Γ (tuple (t ∷ ts)) → LocalReference Σ Γ t

  tail  : LocalReference Σ Γ (tuple (t ∷ ts)) → LocalReference Σ Γ (tuple ts)


data Statement Σ Γ where
  _∙_           : Statement Σ Γ → Statement Σ Γ → Statement Σ Γ
  skip          : Statement Σ Γ
  _≔_           : Reference Σ Γ t → Expression Σ Γ t → Statement Σ Γ
  declare       : Expression Σ Γ t → Statement Σ (t ∷ Γ) → Statement Σ Γ
  invoke        : (f : Procedure Σ Δ) → All (Expression Σ Γ) Δ → Statement Σ Γ
  for           : ∀ n → Statement Σ (fin n ∷ Γ) → Statement Σ Γ
  if_then_      : Expression Σ Γ bool → Statement Σ Γ → Statement Σ Γ
  if_then_else_ : Expression Σ Γ bool → Statement Σ Γ → Statement Σ Γ → Statement Σ Γ


data LocalStatement Σ Γ where
  _∙_           : LocalStatement Σ Γ → LocalStatement Σ Γ → LocalStatement Σ Γ
  skip          : LocalStatement Σ Γ
  _≔_           : LocalReference Σ Γ t → Expression Σ Γ t → LocalStatement Σ Γ
  declare       : Expression Σ Γ t → LocalStatement Σ (t ∷ Γ) → LocalStatement Σ Γ
  for           : ∀ n → LocalStatement Σ (fin n ∷ Γ) → LocalStatement Σ Γ
  if_then_      : Expression Σ Γ bool → LocalStatement Σ Γ → LocalStatement Σ Γ
  if_then_else_ : Expression Σ Γ bool →
                  LocalStatement Σ Γ →
                  LocalStatement Σ Γ →
                  LocalStatement Σ Γ


data Function Σ Γ ret where
  init_∙_end : Expression Σ Γ ret →
               LocalStatement Σ (ret ∷ Γ) →
               Function Σ Γ ret

data Procedure Σ Γ where
  _∙end : Statement Σ Γ → Procedure Σ Γ
#+end_src

* MIPSL Denotational Semantics
#+begin_src agda2
expr      : Expression Σ Γ t        → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
exprs     : All (Expression Σ Γ) ts → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ ts ⟧ₜₛ
ref       : Reference Σ Γ t         → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
locRef    : LocalReference Σ Γ t    → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
stmt      : Statement Σ Γ           → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ
locStmt   : LocalStatement Σ Γ      → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ
fun       : Function Σ Γ t          → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ t ⟧ₜ
proc      : Procedure Σ Γ           → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ

assign    : Reference Σ Γ t      → ⟦ t ⟧ₜ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ →
            ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ
locAssign : LocalReference Σ Γ t → ⟦ t ⟧ₜ → ⟦ Σ ⟧ₜₛ × ⟦ Γ ⟧ₜₛ →
            ⟦ Γ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ

expr (lit {t = t} x)        = const (Κ[ t ] x)
expr {Σ = Σ} (state i)      = fetch i Σ ∘ proj₁
expr {Γ = Γ} (var i)        = fetch i Γ ∘ proj₂
expr (e ≟ e₁)               = lift ∘ does ∘ uncurry ≈-dec ∘ < expr e , expr e₁ >
expr (e <? e₁)              = lift ∘ does ∘ uncurry <-dec ∘ < expr e , expr e₁ >
expr (inv e)                = lift ∘ Bool.not ∘ lower ∘ expr e
expr (e && e₁)              = lift ∘ uncurry (Bool._∧_ on lower) ∘
                                < expr e , expr e₁ >
expr (e || e₁)              = lift ∘ uncurry (Bool._∨_ on lower) ∘
                                < expr e , expr e₁ >
expr (not e)                = map (lift ∘ Bool.not ∘ lower) ∘ expr e
expr (e and e₁)             = uncurry (zipWith (lift ∘₂ Bool._∧_ on lower)) ∘
                                < expr e , expr e₁ >
expr (e or e₁)              = uncurry (zipWith (lift ∘₂ Bool._∨_ on lower)) ∘
                                < expr e , expr e₁ >
expr [ e ]                  = (_∷ []) ∘ expr e
expr (unbox e)              = Vec.head ∘ expr e
expr (merge e e₁ e₂)        = uncurry (uncurry mergeVec) ∘
                                < < expr e , expr e₁ > , lower ∘ expr e₂ >
expr (slice e e₁)           = uncurry sliceVec ∘ < expr e , lower ∘ expr e₁ >
expr (cut e e₁)             = uncurry cutVec ∘ < expr e , lower ∘ expr e₁ >
expr (cast eq e)            = castVec eq ∘ expr e
expr (- e)                  = neg ∘ expr e
expr (e + e₁)               = uncurry add ∘ < expr e , expr e₁ >
expr (e * e₁)               = uncurry mul ∘ < expr e , expr e₁ >
expr (e ^ x)                = flip pow x ∘ expr e
expr (e >> n)               = lift ∘ flip shift n ∘ lower ∘ expr e
expr (rnd e)                = lift ∘ ⌊_⌋ ∘ lower ∘ expr e
expr (fin {ms = ms} f e)    = lift ∘ f ∘ lowerFin ms ∘ expr e
expr (asInt e)              = lift ∘ 𝕀⇒ℤ ∘ 𝕀.+_ ∘ Fin.toℕ ∘ lower ∘ expr e
expr nil                    = const _
expr (cons {ts = ts} e e₁)  = uncurry (cons′ ts) ∘ < expr e , expr e₁ >
expr (head {ts = ts} e)     = head′ ts ∘ expr e
expr (tail {ts = ts} e)     = tail′ ts ∘ expr e
expr (call f es)            = fun f ∘ < proj₁ , exprs es >
expr (if e then e₁ else e₂) = uncurry (uncurry Bool.if_then_else_) ∘
                                < < lower ∘ expr e , expr e₁ > , expr e₂ >

exprs []            = const _
exprs (e ∷ [])      = expr e
exprs (e ∷ e₁ ∷ es) = < expr e , exprs (e₁ ∷ es) >

ref {Σ = Σ} (state i)     = fetch i Σ ∘ proj₁
ref {Γ = Γ} (var i)       = fetch i Γ ∘ proj₂
ref [ r ]                 = (_∷ []) ∘ ref r
ref (unbox r)             = Vec.head ∘ ref r
ref (merge r r₁ e)        = uncurry (uncurry mergeVec) ∘
                              < < ref r , ref r₁ > , lower ∘ expr e >
ref (slice r e)           = uncurry sliceVec ∘ < ref r , lower ∘ expr e >
ref (cut r e)             = uncurry cutVec ∘ < ref r , lower ∘ expr e >
ref (cast eq r)           = castVec eq ∘ ref r
ref nil                   = const _
ref (cons {ts = ts} r r₁) = uncurry (cons′ ts) ∘ < ref r , ref r₁ >
ref (head {ts = ts} r)    = head′ ts ∘ ref r
ref (tail {ts = ts} r)    = tail′ ts ∘ ref r

locRef {Γ = Γ} (var i)       = fetch i Γ ∘ proj₂
locRef [ r ]                 = (_∷ []) ∘ locRef r
locRef (unbox r)             = Vec.head ∘ locRef r
locRef (merge r r₁ e)        = uncurry (uncurry mergeVec) ∘
                                 < < locRef r , locRef r₁ > , lower ∘ expr e >
locRef (slice r e)           = uncurry sliceVec ∘ < locRef r , lower ∘ expr e >
locRef (cut r e)             = uncurry cutVec ∘ < locRef r , lower ∘ expr e >
locRef (cast eq r)           = castVec eq ∘ locRef r
locRef nil                   = const _
locRef (cons {ts = ts} r r₁) = uncurry (cons′ ts) ∘ < locRef r , locRef r₁ >
locRef (head {ts = ts} r)    = head′ ts ∘ locRef r
locRef (tail {ts = ts} r)    = tail′ ts ∘ locRef r

assign {Σ = Σ} (state i)     val σ,γ = < updateAt i Σ val ∘ proj₁ , proj₂ >
assign {Γ = Γ} (var i)       val σ,γ = < proj₁ , updateAt i Γ val ∘ proj₂ >
assign [ r ]                 val σ,γ = assign r (Vec.head val) σ,γ
assign (unbox r)             val σ,γ = assign r (val ∷ []) σ,γ
assign (merge r r₁ e)        val σ,γ = let i = expr e σ,γ in
                                       assign r₁ (cutVec val (lower i)) σ,γ ∘
                                       assign r (sliceVec val (lower i)) σ,γ
assign (slice r e)           val σ,γ = let i = expr e σ,γ in
                                       let cut = cutVec (ref r σ,γ) (lower i) in
                                       assign r (mergeVec val cut (lower i)) σ,γ
assign (cut r e)             val σ,γ = let i = expr e σ,γ in
                                       let slice = sliceVec (ref r σ,γ) (lower i) in
                                       assign r (mergeVec slice val (lower i)) σ,γ
assign (cast eq r)           val σ,γ = assign r (castVec (sym eq) val) σ,γ
assign nil                   val σ,γ = id
assign (cons {ts = ts} r r₁) val σ,γ = assign r₁ (tail′ ts val) σ,γ ∘
                                         assign r (head′ ts val) σ,γ
assign (head {ts = ts} r)    val σ,γ = assign r (cons′ ts val (ref (tail r) σ,γ)) σ,γ
assign (tail {ts = ts} r)    val σ,γ = assign r (cons′ ts (ref (head r) σ,γ) val) σ,γ

locAssign {Γ = Γ} (var i)       val σ,γ = updateAt i Γ val
locAssign [ r ]                 val σ,γ = locAssign r (Vec.head val) σ,γ
locAssign (unbox r)             val σ,γ = locAssign r (val ∷ []) σ,γ
locAssign (merge r r₁ e)        val σ,γ = let i = expr e σ,γ in
                                          locAssign r₁ (cutVec val (lower i)) σ,γ ∘
                                          locAssign r (sliceVec val (lower i)) σ,γ
locAssign (slice r e)           val σ,γ = let i = expr e σ,γ in
                                          let cut = cutVec (ref r σ,γ) (lower i) in
                                          locAssign r (mergeVec val cut (lower i)) σ,γ
locAssign (cut r e)             val σ,γ = let i = expr e σ,γ in
                                          let slice = sliceVec (ref r σ,γ) (lower i) in
                                          locAssign r (mergeVec slice val (lower i)) σ,γ
locAssign (cast eq r)           val σ,γ = locAssign r (castVec (sym eq) val) σ,γ
locAssign nil                   val σ,γ = id
locAssign (cons {ts = ts} r r₁) val σ,γ = locAssign r₁ (tail′ ts val) σ,γ ∘
                                            locAssign r (head′ ts val) σ,γ
locAssign (head {ts = ts} r)    val σ,γ = locAssign r (cons′ ts val (locRef (tail r) σ,γ)) σ,γ
locAssign (tail {ts = ts} r)    val σ,γ = locAssign r (cons′ ts (locRef (head r) σ,γ) val) σ,γ

stmt (s ∙ s₁)              = stmt s₁ ∘ stmt s
stmt skip                  = id
stmt (ref ≔ val)           = uncurry (uncurry (assign ref)) ∘
                               < < expr val , id > , id >
stmt {Γ = Γ} (declare e s) = < proj₁ , tail′ Γ ∘ proj₂ > ∘
                               stmt s ∘
                               < proj₁ , uncurry (cons′ Γ) ∘ < expr e , proj₂ > >
stmt (invoke p es)         = < proc p ∘ < proj₁ , exprs es > , proj₂ >
stmt (if e then s)         = uncurry (uncurry Bool.if_then_else_) ∘
                               < < lower ∘ expr e , stmt s > , id >
stmt (if e then s else s₁) = uncurry (uncurry Bool.if_then_else_) ∘
                               < < lower ∘ expr e , stmt s > , stmt s₁ >
stmt {Γ = Γ} (for m s)     = Vec.foldl _
                               (flip λ i → (
                                 < proj₁ , tail′ Γ ∘ proj₂ > ∘
                                 stmt s ∘
                                 < proj₁ , cons′ Γ (lift i) ∘ proj₂ >) ∘_)
                               id
                               (Vec.allFin m)

locStmt (s ∙ s₁)              = locStmt s₁ ∘ < proj₁ , locStmt s >
locStmt skip                  = proj₂
locStmt (ref ≔ val)           = uncurry (uncurry (locAssign ref)) ∘
                                  < < expr val , id > , proj₂ >
locStmt {Γ = Γ} (declare e s) = tail′ Γ ∘
                                  locStmt s ∘
                                  < proj₁ , uncurry (cons′ Γ) ∘ < expr e , proj₂ > >
locStmt (if e then s)         = uncurry (uncurry Bool.if_then_else_) ∘
                                  < < lower ∘ expr e , locStmt s > , proj₂ >
locStmt (if e then s else s₁) = uncurry (uncurry Bool.if_then_else_) ∘
                                  < < lower ∘ expr e , locStmt s > , locStmt s₁ >
locStmt {Γ = Γ} (for m s)     = proj₂ ∘ Vec.foldl _
                                  (flip λ i → (
                                    < proj₁ , tail′ Γ ∘ locStmt s > ∘
                                    < proj₁ , cons′ Γ (lift i) ∘ proj₂ >) ∘_)
                                  id
                                  (Vec.allFin m)

fun {Γ = Γ} (init e ∙ s end) = fetch zero (_ ∷ Γ) ∘
                                 locStmt s ∘
                                 < proj₁ , uncurry (cons′ Γ) ∘ < expr e , proj₂ > >

proc (s ∙end) = proj₁ ∘ stmt s
#+end_src

#  LocalWords:  Hoare ISAs Jasmin CompCert structs bitstring bitstrings getters MIPSL

#+latex: %TC:endignore

* MIPSL Hoare Logic Definitions
First, the definition and semantics of ~Term~.
#+begin_src agda2
data Term (Σ : Vec Type i) (Γ : Vec Type j) (Δ : Vec Type k) : Type → Set ℓ where
  lit           : ⟦ t ⟧ₜ → Term Σ Γ Δ t

  state         : ∀ i → Term Σ Γ Δ (lookup Σ i)

  var           : ∀ i → Term Σ Γ Δ (lookup Γ i)

  meta          : ∀ i → Term Σ Γ Δ (lookup Δ i)

  _≟_           : ⦃ HasEquality t ⦄ →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ bool

  _<?_          : ⦃ Ordered t ⦄ →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ bool

  inv           : Term Σ Γ Δ bool → Term Σ Γ Δ bool

  _&&_          : Term Σ Γ Δ bool → Term Σ Γ Δ bool → Term Σ Γ Δ bool

  _||_          : Term Σ Γ Δ bool → Term Σ Γ Δ bool → Term Σ Γ Δ bool

  not           : Term Σ Γ Δ (bits n) → Term Σ Γ Δ (bits n)

  _and_         : Term Σ Γ Δ (bits n) →
                  Term Σ Γ Δ (bits n) →
                  Term Σ Γ Δ (bits n)

  _or_          : Term Σ Γ Δ (bits n) →
                  Term Σ Γ Δ (bits n) →
                  Term Σ Γ Δ (bits n)

  [_]           : Term Σ Γ Δ t → Term Σ Γ Δ (array t 1)

  unbox         : Term Σ Γ Δ (array t 1) → Term Σ Γ Δ t

  merge         : Term Σ Γ Δ (array t m) →
                  Term Σ Γ Δ (array t n) →
                  Term Σ Γ Δ (fin (suc n)) →
                  Term Σ Γ Δ (array t (n ℕ.+ m))

  slice         : Term Σ Γ Δ (array t (n ℕ.+ m)) →
                  Term Σ Γ Δ (fin (suc n)) →
                  Term Σ Γ Δ (array t m)

  cut           : Term Σ Γ Δ (array t (n ℕ.+ m)) →
                  Term Σ Γ Δ (fin (suc n)) →
                  Term Σ Γ Δ (array t n)

  cast          : .(eq : m ≡ n) →
                  Term Σ Γ Δ (array t m) →
                  Term Σ Γ Δ (array t n)

  -_            : ⦃ IsNumeric t ⦄ → Term Σ Γ Δ t → Term Σ Γ Δ t

  _+_           : ⦃ isNum₁ : IsNumeric t₁ ⦄ →
                  ⦃ isNum₂ : IsNumeric t₂ ⦄ →
                  Term Σ Γ Δ t₁ →
                  Term Σ Γ Δ t₂ →
                  Term Σ Γ Δ (isNum₁ +ᵗ isNum₂)

  _*_           : ⦃ isNum₁ : IsNumeric t₁ ⦄ →
                  ⦃ isNum₂ : IsNumeric t₂ ⦄ →
                  Term Σ Γ Δ t₁ →
                  Term Σ Γ Δ t₂ →
                  Term Σ Γ Δ (isNum₁ +ᵗ isNum₂)

  _^_           : ⦃ IsNumeric t ⦄ → Term Σ Γ Δ t → ℕ → Term Σ Γ Δ t

  _>>_          : Term Σ Γ Δ int → (n : ℕ) → Term Σ Γ Δ int

  rnd           : Term Σ Γ Δ real → Term Σ Γ Δ int

  fin           : ∀ {ms} →
                  (f : literalTypes (map fin ms) → Fin n) →
                  Term Σ Γ Δ (tuple {n = o} (map fin ms)) →
                  Term Σ Γ Δ (fin n)

  asInt         : Term Σ Γ Δ (fin n) → Term Σ Γ Δ int

  nil           : Term Σ Γ Δ (tuple [])

  cons          : Term Σ Γ Δ t →
                  Term Σ Γ Δ (tuple ts) →
                  Term Σ Γ Δ (tuple (t ∷ ts))

  head          : Term Σ Γ Δ (tuple (t ∷ ts)) → Term Σ Γ Δ t

  tail          : Term Σ Γ Δ (tuple (t ∷ ts)) → Term Σ Γ Δ (tuple ts)

  call          : Function ts ts₁ t →
                  All (Term Σ Γ Δ) ts →
                  All (Term Σ Γ Δ) ts₁ →
                  Term Σ Γ Δ t

  if_then_else_ : Term Σ Γ Δ bool →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ t →
                  Term Σ Γ Δ t

⟦_⟧  : Term Σ Γ Δ t → ⟦ Σ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ → ⟦ Δ ⟧ₜₛ → ⟦ t ⟧ₜ
⟦_⟧ₛ : All (Term Σ Γ Δ) ts → ⟦ Σ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ → ⟦ Δ ⟧ₜₛ → ⟦ ts ⟧ₜₛ

⟦ lit x ⟧                σ γ δ = x
⟦ state i ⟧              σ γ δ = fetch i Σ σ
⟦ var i ⟧                σ γ δ = fetch i Γ γ
⟦ meta i ⟧               σ γ δ = fetch i Δ δ
⟦ e ≟ e₁ ⟧               σ γ δ = (lift ∘₂ does ∘₂ ≈-dec)
                                   (⟦ e ⟧ σ γ δ)
                                   (⟦ e₁ ⟧ σ γ δ)
⟦ e <? e₁ ⟧              σ γ δ = (lift ∘₂ does ∘₂ <-dec)
                                   (⟦ e ⟧ σ γ δ)
                                   (⟦ e₁ ⟧ σ γ δ)
⟦ inv e ⟧                σ γ δ = lift ∘ Bool.not ∘ lower $ ⟦ e ⟧ σ γ δ
⟦ e && e₁ ⟧              σ γ δ = (lift ∘₂ Bool._∧_ on lower)
                                   (⟦ e ⟧ σ γ δ) (⟦ e₁ ⟧ σ γ δ)
⟦ e || e₁ ⟧              σ γ δ = (lift ∘₂ Bool._∨_ on lower)
                                   (⟦ e ⟧ σ γ δ)
                                   (⟦ e₁ ⟧ σ γ δ)
⟦ not e ⟧                σ γ δ = map (lift ∘ Bool.not ∘ lower) (⟦ e ⟧ σ γ δ)
⟦ e and e₁ ⟧             σ γ δ = zipWith (lift ∘₂ Bool._∧_ on lower)
                                  (⟦ e ⟧ σ γ δ)
                                  (⟦ e₁ ⟧ σ γ δ)
⟦ e or e₁ ⟧              σ γ δ = zipWith (lift ∘₂ Bool._∨_ on lower)
                                  (⟦ e ⟧ σ γ δ)
                                  (⟦ e₁ ⟧ σ γ δ)
⟦ [ e ] ⟧                σ γ δ = ⟦ e ⟧ σ γ δ ∷ []
⟦ unbox e ⟧              σ γ δ = Vec.head (⟦ e ⟧ σ γ δ)
⟦ merge e e₁ e₂ ⟧        σ γ δ = mergeVec
                                  (⟦ e ⟧ σ γ δ)
                                  (⟦ e₁ ⟧ σ γ δ)
                                  (lower (⟦ e₂ ⟧ σ γ δ))
⟦ slice e e₁ ⟧           σ γ δ = sliceVec
                                  (⟦ e ⟧ σ γ δ)
                                  (lower (⟦ e₁ ⟧ σ γ δ))
⟦ cut e e₁ ⟧             σ γ δ = cutVec
                                  (⟦ e ⟧ σ γ δ)
                                  (lower (⟦ e₁ ⟧ σ γ δ))
⟦ cast eq e ⟧            σ γ δ = castVec eq (⟦ e ⟧ σ γ δ)
⟦ - e ⟧                  σ γ δ = neg (⟦ e ⟧ σ γ δ)
⟦ e + e₁ ⟧               σ γ δ = add (⟦ e ⟧ σ γ δ) (⟦ e₁ ⟧ σ γ δ)
⟦ e * e₁ ⟧               σ γ δ = mul (⟦ e ⟧ σ γ δ) (⟦ e₁ ⟧ σ γ δ)
⟦ e ^ x ⟧                σ γ δ = pow (⟦ e ⟧ σ γ δ) x
⟦ e >> n ⟧               σ γ δ = lift ∘ flip shift n ∘ lower $ ⟦ e ⟧ σ γ δ
⟦ rnd e ⟧                σ γ δ = lift ∘ ⌊_⌋ ∘ lower $ ⟦ e ⟧ σ γ δ
⟦ fin {ms = ms} f e ⟧    σ γ δ = lift ∘ f ∘ lowerFin ms $ ⟦ e ⟧ σ γ δ
⟦ asInt e ⟧              σ γ δ = lift ∘ 𝕀⇒ℤ ∘ 𝕀.+_ ∘ Fin.toℕ ∘ lower $ ⟦ e ⟧ σ γ δ
⟦ nil ⟧                  σ γ δ = _
⟦ cons {ts = ts} e e₁ ⟧  σ γ δ = cons′ ts (⟦ e ⟧ σ γ δ) (⟦ e₁ ⟧ σ γ δ)
⟦ head {ts = ts} e ⟧     σ γ δ = head′ ts (⟦ e ⟧ σ γ δ)
⟦ tail {ts = ts} e ⟧     σ γ δ = tail′ ts (⟦ e ⟧ σ γ δ)
⟦ call f es es₁ ⟧        σ γ δ = Den.Semantics.fun f
                                  (⟦ es ⟧ₛ σ γ δ , ⟦ es₁ ⟧ₛ σ γ δ)
⟦ if e then e₁ else e₂ ⟧ σ γ δ = Bool.if lower (⟦ e ⟧ σ γ δ)
                                 then ⟦ e₁ ⟧ σ γ δ
                                 else ⟦ e₂ ⟧ σ γ δ

⟦_⟧ₛ               []            σ γ δ = _
⟦_⟧ₛ {ts = _ ∷ ts} (e ∷ es)      σ γ δ = cons′ ts (⟦ e ⟧ σ γ δ) (⟦ es ⟧ₛ σ γ δ)
#+end_src

And here is the definition of ~Assertion~ with its semantics.

#+begin_src agda2
data Assertion (Σ : Vec Type o) (Γ : Vec Type n) (Δ : Vec Type m)
               : Set (ℓsuc ℓ) where
  all    : Assertion Σ Γ (t ∷ Δ) → Assertion Σ Γ Δ
  some   : Assertion Σ Γ (t ∷ Δ) → Assertion Σ Γ Δ
  pred   : Term Σ Γ Δ bool → Assertion Σ Γ Δ
  true   : Assertion Σ Γ Δ
  false  : Assertion Σ Γ Δ
  ¬_     : Assertion Σ Γ Δ → Assertion Σ Γ Δ
  _∧_    : Assertion Σ Γ Δ → Assertion Σ Γ Δ → Assertion Σ Γ Δ
  _∨_    : Assertion Σ Γ Δ → Assertion Σ Γ Δ → Assertion Σ Γ Δ
  _⟶_    : Assertion Σ Γ Δ → Assertion Σ Γ Δ → Assertion Σ Γ Δ

⟦_⟧ : Assertion Σ Γ Δ → ⟦ Σ ⟧ₜₛ → ⟦ Γ ⟧ₜₛ → ⟦ Δ ⟧ₜₛ → Set ℓ
⟦_⟧ {Δ = Δ} (all P)  σ γ δ = ∀ x → ⟦ P ⟧ σ γ (cons′ Δ x δ)
⟦_⟧ {Δ = Δ} (some P) σ γ δ = ∃ λ x → ⟦ P ⟧ σ γ (cons′ Δ x δ)
⟦ pred p ⟧           σ γ δ = Lift ℓ (Bool.T (lower (TS.⟦ p ⟧ σ γ δ)))
⟦ true ⟧             σ γ δ = Lift ℓ ⊤
⟦ false ⟧            σ γ δ = Lift ℓ ⊥
⟦ ¬ P ⟧              σ γ δ = ⟦ P ⟧ σ γ δ → ⊥
⟦ P ∧ Q ⟧            σ γ δ = ⟦ P ⟧ σ γ δ × ⟦ Q ⟧ σ γ δ
⟦ P ∨ Q ⟧            σ γ δ = ⟦ P ⟧ σ γ δ ⊎ ⟦ Q ⟧ σ γ δ
⟦ P ⟶ Q ⟧            σ γ δ = ⟦ P ⟧ σ γ δ → ⟦ Q ⟧ σ γ δ
#+end_src