# compcert.bib

@inproceedings{Blazy-Leroy-05,
author = {Sandrine Blazy and Xavier Leroy},
title = {Formal verification of a memory model for
{C}-like imperative languages},
booktitle = {International Conference on
Formal Engineering Methods (ICFEM 2005)},
year = 2005,
volume = 3785,
series = {Lecture Notes in Computer Science},
publisher = {Springer},
pages = {280--299},
url = {http://xavierleroy.org/publi/memory-model.pdf},
hal = {http://hal.inria.fr/inria-00077921/},
abstract = {
This paper presents a formal verification with the Coq proof assistant
of a memory model for C-like imperative languages. This model defines
the memory layout and the operations that manage the memory. The model
has been specified at two levels of abstraction and implemented as
part of an ongoing certification in Coq of a moderately-optimising C
compiler. Many properties of the memory have been verified in the
specification. They facilitate the definition of precise formal
semantics of C pointers. A certified OCaml code implementing the
memory model has been automatically extracted from the
specifications.},
xtopic = {compcert}
}

@inproceedings{2006-Leroy-Bertot-Gregoire,
author = {Yves Bertot and Benjamin Grégoire and Xavier Leroy},
title = {A structured approach to proving compiler
optimizations based on dataflow analysis},
booktitle = {Types for Proofs and Programs, Workshop TYPES 2004},
year = 2006,
volume = 3839,
pages = {66-81},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
url = {http://xavierleroy.org/publi/proofs_dataflow_optimizations.pdf},
hal = {http://hal.inria.fr/inria-00289549/},
abstract = {
This paper reports on the correctness proof of compiler optimizations
based on data-flow analysis.  We formulate the optimizations and
analyses as instances of a general framework for data-flow analyses and
transformations, and prove that the optimizations preserve the
behavior of the compiled programs.  This development is a part of a
larger effort of certifying an optimizing compiler by proving semantic
equivalence between source and compiled code.}
}

@inproceedings{2006-Leroy-compcert,
author = {Xavier Leroy},
title = {Formal certification of a compiler back-end, or:
programming a compiler with a proof assistant},
booktitle = {33rd ACM symposium on Principles of Programming Languages},
year = 2006,
publisher = {ACM Press},
pages = {42--54},
url = {http://xavierleroy.org/publi/compiler-certif.pdf},
urlpublisher = {http://doi.acm.org/10.1145/1111037.1111042},
hal = {http://hal.inria.fr/inria-00000963/},
abstract = {This paper reports on the development and formal certification (proof
of semantic preservation) of a compiler from Cminor (a C-like
imperative language) to PowerPC assembly code, using the Coq proof
assistant both for programming the compiler and for proving its
correctness.  Such a certified compiler is useful in the context of
formal methods applied to the certification of critical software: the
certification of the compiler guarantees that the safety properties
proved on the source code hold for the executable compiled code as well.},
pubkind = {conf-int-mono}
}

@inproceedings{2006-Leroy-coindsem,
author = {Xavier Leroy},
title = {Coinductive big-step operational semantics},
booktitle = {European Symposium on Programming (ESOP'06)},
year = 2006,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 3924,
pages = {54-68},
url = {http://xavierleroy.org/publi/coindsem.pdf},
urlpublisher = {http://dx.doi.org/10.1007/11693024_5},
hal = {http://hal.inria.fr/inria-00289545/},
abstract = {This paper illustrates the use of co-inductive definitions and proofs
in big-step operational semantics, enabling the latter to describe
diverging evaluations in addition to terminating evaluations. We show
applications to proofs of type soundness and to proofs of semantic
preservation for compilers.  (See
\verb|http://gallium.inria.fr/~xleroy/publi/coindsem/|
for the Coq on-machine formalization of these results.)},
pubkind = {conf-int-mono}
}

@inproceedings{2006-Leroy-Blazy-Dargaye,
author = {Sandrine Blazy and Zaynah Dargaye and Xavier Leroy},
title = {Formal Verification of a {C} Compiler Front-End},
booktitle = {FM 2006: Int. Symp. on Formal Methods},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = 4085,
year = 2006,
pages = {460--475},
url = {http://xavierleroy.org/publi/cfront.pdf},
urlpublisher = {http://dx.doi.org/10.1007/11813040_31},
hal = {http://hal.inria.fr/inria-00106401/},
abstract = {This paper presents the formal verification of a compiler front-end
that translates a subset of the C language into the Cminor
intermediate language.  The semantics of the source and target
languages as well as the translation between them have been written in
the specification language of the Coq proof assistant. The proof of
observational semantic equivalence between the source and generated
code has been machine-checked using Coq.  An executable compiler was
obtained by automatic extraction of executable Caml code from the Coq
specification of the translator, combined with a certified compiler
back-end generating PowerPC assembly code from Cminor, described in
previous work.},
pubkind = {conf-int-multi}
}

@techreport{2006-Leroy-Appel-listmachine-tr,
author = {Appel, Andrew W.  and  Leroy, Xavier},
title = {A list-machine benchmark for mechanized metatheory},
year = {2006},
institution = {INRIA},
number = {5914},
type = {Research report},
hal = {http://hal.inria.fr/inria-00077531/},
abstract = {We propose a benchmark to compare theorem-proving systems
on their ability to express proofs of compiler correctness.
In contrast to the first POPLmark, we emphasize the connection
of proofs to compiler implementations, and we point out
that much can be done without binders or alpha-conversion.
We propose specific criteria for evaluating the utility
of mechanized metatheory systems; we have constructed
solutions in both Coq and Twelf metatheory, and
we draw conclusions about those two systems in particular.}
}

@inproceedings{2007-Blazy-Appel-Cminor-sos,
author = {Andrew W. Appel and Sandrine Blazy},
title = {Separation logic for small-step {Cminor}},
booktitle = {Theorem Proving in Higher Order Logics, 20th
Int. Conf. TPHOLs 2007},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 4732,
pages = {5--21},
year = 2007,
mon = sep,
url = {http://www.ensiie.fr/~blazy/AppelBlazy07.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-540-74591-4_3},
hal = {http://hal.inria.fr/inria-00165915/},
pubkind = {conf-int-mono}
}

@inproceedings{2007-Blazy-C-semantics,
author = {Sandrine Blazy},
title = {Experiments in validating formal semantics for {C}},
booktitle = {Proceedings of the C/C++ Verification Workshop},
pages = {95--102},
year = 2007,
mon = jul,
url = {http://www.ensiie.fr/~blazy/C07Blazy.pdf},
hal = {http://hal.inria.fr/inria-00292043/},
publisher = {Technical report ICIS-R07015, Radboud University Nijmegen},
pubkind = {conf-int-mono}
}

@inproceedings{2007-Dargaye-JFLA,
author = {Zaynah Dargaye},
title = {Décurryfication certifiée},
booktitle = {Journées Francophones des Langages Applicatifs (JFLA'07)},
pages = {119--134},
publisher = {INRIA},
year = {2007},
url = {http://gallium.inria.fr/~dargaye/publications/certdec.pdf},
hal = {http://hal.inria.fr/inria-00338974/},
pubkind = {conf-fr-mono}
}

@inproceedings{2007-Dargaye-Leroy-LPAR,
author = {Zaynah Dargaye and Xavier Leroy},
title = {Mechanized verification of {CPS} transformations},
booktitle = {Logic for Programming,
Artificial Intelligence and Reasoning,
14th Int. Conf. LPAR 2007},
year = 2007,
series = {Lecture Notes in Artificial Intelligence},
volume = 4790,
publisher = {Springer},
pages = {211--225},
url = {http://xavierleroy.org/publi/cps-dargaye-leroy.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-540-75560-9_17},
hal = {http://hal.inria.fr/inria-00289541/},
abstract = {
Transformation to continuation-passing style (CPS) is often performed
by optimizing compilers for functional programming languages.  As part
of the development and proof of correctness of a compiler for the
mini-ML functional language, we have mechanically verified the
correctness of two CPS transformations for a call-by-value
$\lambda$-calculus with $n$-ary functions, recursive functions, data
types and pattern-matching.  The transformations generalize Plotkin's
original call-by-value transformation and Danvy and Nielsen's
optimized transformation, respectively.  We used the Coq proof
assistant to formalize the transformations and conduct and check the
proofs.  Originalities of this work include the use of big-step
operational semantics to avoid difficulties with administrative
redexes, and of two-sorted de Bruijn indices to avoid difficulties
with $\alpha$-conversion.},
pubkind = {conf-int-mono}
}

@inproceedings{2007-Doligez-Bonichon-Delahaye,
author = {Richard Bonichon and David Delahaye and Damien Doligez},
title = {{Zenon}: an Extensible Automated Theorem Prover Producing Checkable Proofs},
booktitle = {Logic for Programming,
Artificial Intelligence and Reasoning,
14th Int. Conf. LPAR 2007},
year = 2007,
series = {Lecture Notes in Artificial Intelligence},
volume = 4790,
publisher = {Springer},
pages = {151--165},
url = {http://focal.inria.fr/zenon/zenlpar07.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-540-75560-9_13},
hal = {http://hal.inria.fr/inria-00315920/},
abstract = {
We present Zenon, an automated theorem prover for first order
classical logic (with equality), based on the tableau method. Zenon is
intended to be the dedicated prover of the Focal environment, an
object-oriented algebraic specification and proof system, which is
able to produce OCaml code for execution and Coq code for
certification. Zenon can directly generate Coq proofs (proof
scripts or proof terms), which can be reinserted in the Coq
specifications produced by Focal. Zenon can also be extended,
which makes specific (and possibly local) automation possible
in Focal.},
pubkind = {conf-int-multi}
}

@inproceedings{2007-Leroy-Appel-listmachine-lfmtp,
author = {Appel, Andrew W.  and  Leroy, Xavier},
title = {A list-machine benchmark for mechanized metatheory
(extended abstract)},
booktitle = {Proc. Int. Workshop on Logical
Frameworks and Meta-Languages (LFMTP'06)},
year = {2007},
series = {Electronic Notes in Computer Science},
volume = {174/5},
pages = {95--108},
url = {http://xavierleroy.org/publi/listmachine-lfmtp.pdf},
urlpublisher = {http://dx.doi.org/10.1016/j.entcs.2007.01.020},
hal = {http://hal.inria.fr/inria-00289543/},
abstract = {Short version of \cite{2006-Leroy-Appel-listmachine-tr}.},
pubkind = {conf-int-mono}
}

@article{2007-Leroy-Grall,
author = {Xavier Leroy and Hervé Grall},
title = {Coinductive big-step operational semantics},
journal = {Information and Computation},
volume = 207,
number = 2,
pages = {284--304},
year = 2009,
url = {http://xavierleroy.org/publi/coindsem-journal.pdf},
urlpublisher = {http://dx.doi.org/10.1016/j.ic.2007.12.004},
hal = {http://hal.inria.fr/inria-00309010/},
abstract = {Using a call-by-value functional language as an example, this article
illustrates the use of coinductive definitions and proofs in big-step
operational semantics, enabling it to describe diverging evaluations
in addition to terminating evaluations.  We formalize the connections
between the coinductive big-step semantics and the standard small-step
semantics, proving that both semantics are equivalent.  We then study
the use of coinductive big-step semantics in proofs of type soundness
and proofs of semantic preservation for compilers.
A methodological originality of this paper is that all results
have been proved using the Coq proof assistant.  We explain the
proof-theoretic presentation of coinductive definitions and proofs
offered by Coq, and show that it facilitates the discovery and the
presentation of the results.},
pubkind = {journal-int-mono}
}

@article{2007-Blazy-C-chronique,
author = {Sandrine Blazy},
title = {Comment gagner confiance en {C} ?},
journal = {Technique et Science Informatiques},
year = {2007},
volume = {26},
number = {9},
pages = {1195-1200},
url = {http://www.ensiie.fr/~blazy/TSI07Blazy.pdf},
hal = {http://hal.inria.fr/inria-00292049/},
pubkind = {journal-fr-mono}
}

@inproceedings{2007-Blazy-JGA,
author = {Sandrine Blazy and Benoît Robillard and Eric Soutif},
title = {Coloration avec préférences dans les graphes triangulés},
booktitle = {Journées Graphes et Algorithmes (JGA'07)},
year = {2007},
month = nov,
url = {http://www.liafa.jussieu.fr/jga07/JGA07.pdf},
pubkind = {conf-fr-mono}
}

@techreport{2007-Blazy-Appel-Cminor-sosRR,
author = {Andrew W. Appel and Sandrine Blazy},
title = {Separation logic for small-step {Cminor} (extended version)},
note = {29 pages},
year = 2007,
mon = mar,
institution = {INRIA},
number = {6138},
type = {Research report},
hal = {http://hal.inria.fr/inria-00134699/}
}

@article{2008-Leroy-Blazy-memory-model,
author = {Xavier Leroy and Sandrine Blazy},
title = {Formal verification of a {C}-like memory model
and its uses for verifying program transformations},
journal = {Journal of Automated Reasoning},
volume = 41,
number = 1,
pages = {1--31},
url = {http://xavierleroy.org/publi/memory-model-journal.pdf},
urlpublisher = {http://dx.doi.org/10.1007/s10817-008-9099-0},
hal = {http://hal.inria.fr/inria-00289542/},
year = 2008,
pubkind = {journal-int-multi}
}

@article{2008-Leroy-Rideau-Serpette-pmov,
author = {Laurence Rideau and Bernard Paul Serpette and
Xavier Leroy},
title = {Tilting at windmills with {Coq}:
Formal verification of a compilation algorithm
for parallel moves},
journal = {Journal of Automated Reasoning},
volume = 40,
number = 4,
pages = {307--326},
url = {http://xavierleroy.org/publi/parallel-move.pdf},
urlpublisher = {http://dx.doi.org/10.1007/s10817-007-9096-8},
hal = {http://hal.inria.fr/inria-00289709/},
year = 2008,
pubkind = {journal-int-multi}
}

@inproceedings{2008-Tristan-Leroy-POPL,
author = {Jean-Baptiste Tristan and Xavier Leroy},
title = {Formal verification of translation validators:
A case study on instruction scheduling optimizations},
booktitle = {Proceedings of the 35th {ACM} Symposium on Principles of
Programming Languages (POPL'08)},
pages = {17--27},
publisher = {ACM Press},
year = 2008,
month = jan,
url = {http://xavierleroy.org/publi/validation-scheduling.pdf},
hal = {http://hal.inria.fr/inria-00289540/},
urlpublisher = {http://doi.acm.org/10.1145/1328897.1328444},
pubkind = {conf-int-mono},
abstract = {Translation validation consists of transforming a program and a
posteriori validating it in order to detect a modification of its
semantics. This approach can be used in a verified compiler, provided
that validation is formally proved to be correct. We present two such
validators and their Coq proofs of correctness.  The validators are
designed for two instruction scheduling optimizations: list
scheduling and trace scheduling.}
}

@inproceedings{2008-Blazy-JFLA,
author = {Sandrine Blazy and Benoît Robillard and Eric Soutif},
title = {Vérification formelle d'un algorithme d'allocation de registres par coloration de graphes},
booktitle = {Journées Francophones des Langages Applicatifs (JFLA'08)},
year = 2008,
month = jan,
pages = {31--46},
url = {http://www.ensiie.fr/~blazy/JFLABRS08.pdf},
hal = {http://hal.inria.fr/inria-00202713/},
pubkind = {conf-fr-mono}
}

@techreport{2007-Bertot-progsem-tr,
author = {Yves Bertot},
title = {Theorem proving support in programming language semantics},
year = {2007},
institution = {INRIA},
type = {Research report},
number = {6242},
hal = {http://hal.inria.fr/inria-00160309/}
}

@incollection{2009-Bertot-progsem,
author = {Yves Bertot},
title = {Theorem proving support in programming language semantics},
booktitle = {From Semantics to Computer Science -- Essays in Honour of {Gilles} {Kahn}},
editor = {Y. Bertot and G. Huet and J.-J. Lévy and G. Plotkin},
publisher = {Cambridge University Press},
pages = {337-362},
year = {2009},
urlpublisher = {http://www.cambridge.org/catalogue/catalogue.asp?isbn=9780521518253}
}

@unpublished{2006-Bertot-Tarski,
title = {Extending the Calculus of Constructions
with {Tarski}'s fix-point theorem},
author = {Yves Bertot},
abstract = {We propose to use Tarski's least fixpoint theorem as a basis to define recursive functions in the calculus of inductive constructions. This widens the class of functions that can be modeled in type-theory based theorem proving tool to potentially non-terminating functions. This is only possible if we extend the logical framework by adding the axioms that correspond to classical logic. We claim that the extended framework makes it possible to reason about terminating and non-terminating computations and we show that common facilities of the calculus of inductive construction, like program extraction can be extended to also handle the new functions.},
year = {2006},
hal = {http://hal.inria.fr/inria-00105529/}
}

@inproceedings{2008-Bertot-Komendantsky,
title = {Fixed point semantics and partial recursion in {Coq}},
author = {Yves Bertot and Vladimir Komendantsky},
booktitle = {10th int. ACM SIGPLAN conference on
Principles and Practice of Declarative Programming
(PPDP 2008)},
publisher = {ACM Press},
year = 2008,
pages = {89-96},
abstract = {We propose to use the least fixed point theorem as a basis to define recursive functions in the Calculus of Inductive Constructions. This widens the class of functions that can be modeled in type-theory based theorem proving tools to potentially non-terminating functions. This is only possible if we extend the logical framework by adding some axioms of classical logic. We claim that the extended framework makes it possible to reason about terminating or non-terminating computations and we show that extraction can also be extended to handle the new functions.},
hal = {http://hal.inria.fr/inria-00190975/},
urlpublisher = {http://doi.acm.org/10.1145/1389449.1389461},
pubkind = {conf-int-mono}
}

@inproceedings{2008-Blazy-ROADEF,
author = {Sandrine Blazy and Benoît Robillard and Éric Soutif},
title = {Coloration avec préférences: complexité,
inégalités valides et vérification formelle},
booktitle = {ROADEF'08, 9e congrès de la Société Française de
Recherche Opérationnelle et d'Aide à la Décision},
year = {2008},
hal = {http://hal.inria.fr/inria-00260712/},
pubkind = {conf-fr-mono}
}

@article{Leroy-backend,
author = {Xavier Leroy},
title = {A formally verified compiler back-end},
journal = {Journal of Automated Reasoning},
volume = 43,
number = 4,
pages = {363--446},
year = 2009,
url = {http://xavierleroy.org/publi/compcert-backend.pdf},
urlpublisher = {http://dx.doi.org/10.1007/s10817-009-9155-4},
hal = {http://hal.inria.fr/inria-00360768/},
pubkind = {journal-int-mono}
}

@article{Blazy-Leroy-Clight-09,
author = {Sandrine Blazy and Xavier Leroy},
title = {Mechanized semantics for the {Clight}
subset of the {C} language},
year = 2009,
journal = {Journal of Automated Reasoning},
url = {http://xavierleroy.org/publi/Clight.pdf},
hal = {http://hal.inria.fr/inria-00352524/},
urlpublisher = {http://dx.doi.org/10.1007/s10817-009-9148-3},
volume = 43,
number = 3,
pages = {263-288}
}

@inproceedings{Tristan-Leroy-LCM,
author = {Jean-Baptiste Tristan and Xavier Leroy},
title = {Verified Validation of {Lazy} {Code} {Motion}},
booktitle = {Proceedings of the 2009 ACM SIGPLAN Conference on
Programming Language Design and Implementation
(PLDI'09)},
year = 2009,
pages = {316--326},
url = {http://xavierleroy.org/publi/validation-LCM.pdf},
urlpublisher = {http://doi.acm.org/10.1145/1542476.1542512},
hal = {http://hal.archives-ouvertes.fr/inria-00415865/},
pubkind = {conf-int-mono}
}

@inproceedings{Blazy-Robillard-splitting,
author = {Sandrine Blazy and Benoît Robillard},
title = {Live-range Unsplitting for Faster Optimal Coalescing},
booktitle = {Proceedings of the ACM SIGPLAN/SIGBED 2009
conference on Languages,
Compilers, and Tools for Embedded Systems
(LCTES 2009)},
pages = {70--79},
publisher = {ACM Press},
year = 2009,
url = {http://www.ensiie.fr/~blazy/LCTES09.pdf},
urlpublisher = {http://doi.acm.org/10.1145/1542452.1542462},
hal = {http://hal.inria.fr/inria-00387749/},
pubkind = {conf-int-mono}
}

@inproceedings{Letouzey-CiE-08,
author = {Letouzey, Pierre},
booktitle = {Logic and Theory of Algorithms, Fourth Conference on Computability in Europe, CiE 2008},
title = {Extraction in {Coq}: An Overview},
pages = {359--369},
year = 2008,
urlpublisher = {http://dx.doi.org/10.1007/978-3-540-69407-6_39},
hal = {http://hal.archives-ouvertes.fr/hal-00338973/},
volume = 5028,
series = {Lecture Notes in Computer Science},
publisher = {Springer},
pubkind = {conf-int-mono}
}

@incollection{Bertot-absint-08,
author = {Yves Bertot},
title = {Structural abstract interpretation:
A formal study using {Coq}},
booktitle = {Language Engineering and Rigorous
Software Development,
International LerNet ALFA Summer School 2008},
editor = {Ana Bove and Jorge Sousa Pinto},
series = {Lecture Notes in Computer Science},
volume = 5520,
publisher = {Springer},
year = 2009,
pages = {153-194},
urlpublisher = {http://dx.doi.org/10.1007/978-3-642-03153-3_4},
hal = {http://hal.inria.fr/inria-00329572/}
}

@phdthesis{Blazy-HDR,
author = {Sandrine Blazy},
title = {Sémantiques formelles},
type = {Habilitation à diriger les recherches},
school = {Université Évry Val d'Essone},
year = 2008,
month = oct,
url = {http://www.ensiie.fr/~blazy/hdr.html},
hal = {http://tel.archives-ouvertes.fr/tel-00336576/}
}

@inproceedings{Glondu-JFLA09,
author = {Stéphane Glondu},
title = {Extraction certifiée dans {Coq}-en-{Coq}},
booktitle = {Journées Francophones des Langages Applicatifs (JFLA'09)},
publisher = {INRIA},
year = {2009},
pages = {105--118},
url = {http://stephane.glondu.net/jfla09.pdf},
hal = {http://hal.inria.fr/inria-00362717/},
pubkind = {conf-fr-mono}
}

@article{Dargaye-Leroy-uncurrying,
author = {Zaynah Dargaye and Xavier Leroy},
title = {A verified framework for higher-order uncurrying optimizations},
journal = {Higher-Order and Symbolic Computation},
url = {http://xavierleroy.org/publi/higher-order-uncurrying.pdf},
urlpublisher = {http://dx.doi.org/10.1007/s10990-010-9050-z},
year = 2009,
volume = 22,
number = 3,
pages = {199--231}
}

@article{Leroy-Compcert-CACM,
author = {Xavier Leroy},
title = {Formal verification of a realistic compiler},
journal = {Communications of the ACM},
year = 2009,
volume = 52,
number = 7,
pages = {107--115},
url = {http://xavierleroy.org/publi/compcert-CACM.pdf},
urlpublisher = {http://doi.acm.org/10.1145/1538788.1538814},
hal = {http://hal.archives-ouvertes.fr/inria-00415861/},
pubkind = {journal-int-mono},
abstract = {This paper reports on the development and formal verification (proof
of semantic preservation) of CompCert, a compiler from Clight (a
large subset of the C programming language) to PowerPC assembly code,
using the Coq proof assistant both for programming the compiler and
for proving its correctness.  Such a verified compiler is useful in
the context of critical software and its formal verification: the
verification of the compiler guarantees that the safety properties
proved on the source code hold for the executable compiled code as
well.}
}

@inproceedings{Tristan-Leroy-softpipe,
author = {Jean-Baptiste Tristan and Xavier Leroy},
title = {A simple, verified validator for software pipelining},
booktitle = {Proceedings of the 37th {ACM} Symposium on Principles of
Programming Languages (POPL'10)},
pages = {83--92},
publisher = {ACM Press},
year = 2010,
url = {http://xavierleroy.org/publi/validation-softpipe.pdf},
urlpublisher = {http://doi.acm.org/10.1145/1706299.1706311}
}

@phdthesis{Dargaye-these,
author = {Zaynah Dargaye},
title = {Vérification formelle d'un compilateur pour langages fonctionnels},
school = {Université Paris 7 Diderot},
year = 2009,
month = jul,
hal = {http://tel.archives-ouvertes.fr/tel-00452440/}
}

@phdthesis{Tristan-these,
author = {Jean-Baptise Tristan},
title = {Formal verification of translation validators},
school = {Université Paris 7 Diderot},
month = nov,
year = 2009,
hal = {http://tel.archives-ouvertes.fr/tel-00437582/}
}

@article{Leroy-La-Recherche-10,
author = {Xavier Leroy},
title = {Comment faire confiance à un compilateur?},
journal = {La Recherche},
note = {Les cahiers de l'INRIA},
year = {2010},
month = apr,
volume = 440,
url = {http://xavierleroy.org/publi/cahiers-inria-2010.pdf},
urlpublisher = {http://www.lescahiersinria.fr/sites/default/files/inria-n440-avril10.pdf},
xtopic = {compcert},
popularscience = {yes},
abstract = {(In French.)  A short introduction to compiler verification, published in the French popular science magazine La Recherche.}
}

@inproceedings{Rideau-Leroy-regalloc,
author = {Silvain Rideau and Xavier Leroy},
title = {Validating register allocation and spilling},
booktitle = {Compiler Construction (CC 2010)},
year = 2010,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 6011,
pages = {224-243},
xtopic = {compcert},
url = {http://xavierleroy.org/publi/validation-regalloc.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-642-11970-5_13},
abstract = {Following the translation validation approach to high-assurance
compilation, we describe a new algorithm for validating {\em a
posteriori} the results of a run of register allocation.  The
algorithm is based on backward dataflow inference of
equations between variables, registers and stack locations, and can
cope with sophisticated forms of spilling and live range splitting, as
well as many forms of architectural irregularities such as overlapping
registers.  The soundness of the algorithm was mechanically proved
using the Coq proof assistant.}
}

@incollection{Leroy-Marktoberdorf-09,
author = {Xavier Leroy},
title = {Mechanized semantics},
booktitle = {Logics and languages for reliability and security},
editors = {J. Esparza and B. Spanfelner and O. Grumberg},
series = {NATO Science for Peace and Security Series D: Information and Communication Security},
volume = 25,
pages = {195--224},
publisher = {IOS Press},
year = {2010},
url = {http://xavierleroy.org/courses/Marktoberdorf-2009/notes.pdf},
urlpublisher = {http://dx.doi.org/10.3233/978-1-60750-100-8-195},
xtopic = {mechsem},
abstract = {The goal of this lecture is to show how modern theorem provers---in
this case, the Coq proof assistant---can be used to mechanize the specification
of programming languages and their semantics, and to reason over
individual programs and over generic program transformations,
as typically found in compilers.  The topics covered include:
operational semantics (small-step, big-step, definitional
interpreters); a simple form of denotational semantics; axiomatic
semantics and Hoare logic; generation of verification conditions, with
application to program proof; compilation to virtual machine code and
its proof of correctness; an example of an optimizing program
transformation (dead code elimination) and its proof of correctness.}
}

@inproceedings{Leroy-PPES-2011,
author = {Ricardo {Bedin França} and Denis Favre-Felix and Xavier Leroy
and Marc Pantel and Jean Souyris},
title = {Towards Optimizing Certified Compilation in Flight Control Software},
booktitle = {Workshop on Predictability and Performance in Embedded Systems (PPES 2011)},
pages = {59--68},
series = {OpenAccess Series in Informatics},
volume = 18,
publisher = {Dagstuhl Publishing},
year = {2011},
url = {http://hal.archives-ouvertes.fr/inria-00551370/},
urlpublisher = {http://dx.doi.org/10.4230/OASIcs.PPES.2011.59},
xtopic = {compcert},
abstract = {
This work presents a preliminary evaluation of the use of the CompCert formally specified and verified  optimizing compiler for the development of level A critical flight control software. First, the motivation for choosing CompCert is presented, as well as the requirements and constraints for safety-critical avionics software. The main point is to allow optimized code generation by relying on the formal proof of correctness instead of the current un-optimized generation required to produce assembly code structurally similar to the algorithmic language (and even the initial models) source code. The evaluation of its performance (measured using WCET) is presented and the results are compared to those obtained with the currently used compiler. Finally, the paper discusses verification and certification issues that are raised when one seeks to use CompCert for the development of such critical software. }
}

@inproceedings{Leroy-POPL11,
author = {Xavier Leroy},
title = {Verified squared: does critical software deserve verified tools?},
booktitle = {38th symposium Principles of Programming Languages},
pages = {1--2},
year = 2011,
publisher = {ACM Press},
note = {Abstract of invited lecture},
url = {http://xavierleroy.org/publi/popl11-invited-talk.pdf},
urlpublisher = {http://dx.doi.org/10.1145/1926385.1926387},
xtopic = {compcert},
abstract = {
The formal verification of programs have progressed tremendously in
Hoare logic and abstract interpretation finally gave birth to quality
verification tools, operating over source code (and not just idealized
models thereof) and able to verify complex real-world applications.
In this talk, I review some of the obstacles that
remain to be lifted before source-level verification tools can be
taken really seriously in the critical software industry: not just as
sophisticated bug-finders, but as elements of absolute confidence in
the correctness of a critical application.
}
}

@inproceedings{Bedin-Franca-ERTS-2012,
author = {Ricardo {Bedin França} and Sandrine Blazy and Denis Favre-Felix
and Xavier Leroy and Marc Pantel and Jean Souyris},
title = {Formally verified optimizing compilation in {ACG}-based
flight control software},
booktitle = {Embedded Real Time Software and Systems (ERTS 2012)},
year = 2012,
url = {http://hal.inria.fr/hal-00653367/},
xtopic = {compcert},
abstract = {This work presents an evaluation of the CompCert formally specified
and verified optimizing compiler for the development of DO-178 level A
flight control software. First, some fundamental characteristics of
flight control software are presented and the case study program is
described. Then, the use of CompCert is justified: its main point is
to allow optimized code generation by relying on the formal proof of
current un-optimized generation required to produce predictable
assembly code patterns. The evaluation of its performance (measured
using WCET and code size) is presented and the results are compared to
those obtained with the currently used compiler.}
}

@inproceedings{Jourdan-Pottier-Leroy,
author = {Jacques-Henri Jourdan and François Pottier and Xavier Leroy},
title = {Validating {LR(1)} Parsers},
booktitle = {Programming Languages and Systems -- 21st European Symposium on Programming, ESOP 2012},
url = {http://xavierleroy.org/publi/validated-parser.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-642-28869-2_20},
year = 2012,
pages = {397--416},
series = {Lecture Notes in Computer Science},
volume = 7211,
publisher = {Springer},
xtopic = {compcert},
abstract = {
An LR(1) parser is a finite-state automaton, equipped with a stack,
which uses a combination of its current state and one lookahead symbol
in order to determine which action to perform next.  We present a
validator which, when applied to a context-free grammar G and an
automaton A, checks that A and G agree.  This validation of the parser
provides the correctness guarantees required by verified compilers and
other high-assurance software that involves parsing.  The validation
process is independent of which technique was used to construct A. The
validator is implemented and proved correct using the Coq proof
assistant. As an application, we build a formally-verified parser for
the C99 language.}
}

@inproceedings{Leroy-APLAS-2012,
author = {Xavier Leroy},
title = {Mechanized Semantics for Compiler Verification},
note = {Abstract of invited talk},
booktitle = {Programming Languages and Systems, 10th Asian Symposium, APLAS 2012},
editor = {Ranjit Jhala and Atsushi Igarashi},
year = {2012},
pages = {386--388},
series = {Lecture Notes in Computer Science},
volume = 7705,
publisher = {Springer},
url = {http://xavierleroy.org/publi/mechanized-semantics-aplas-cpp-2012.pdf},
urlpublisher = {http://dx.doi.org/10.1007/978-3-642-35182-2_27},
xtopic = {mechsem},
refereed = {no},
abstract = {The formal verification of compilers and related programming tools
depends crucially on the availability of appropriate mechanized
semantics for the source, intermediate and target languages.  In this
invited talk, I review various forms of operational semantics and
their mechanization, based on my experience with the formal
verification of the CompCert~C compiler.}
}

@inproceedings{Robert-Leroy-2012,
author = {Valentin Robert and Xavier Leroy},
title = {A Formally-Verified Alias Analysis},
booktitle = {Certified Programs and Proofs (CPP 2012)},
year = 2012,
series = {Lecture Notes in Computer Science},
volume = 7679,
pages = {11-26},
publisher = {Springer},
urlpublisher = {http://dx.doi.org/10.1007/978-3-642-35308-6_5},
url = {http://xavierleroy.org/publi/alias-analysis.pdf},
xtopic = {compcert},
abstract = {This paper reports on the formalization and proof of soundness, using
the Coq proof assistant, of an alias analysis: a static analysis that
approximates the flow of pointer values.  The alias analysis
considered is of the points-to kind and is intraprocedural,
flow-sensitive, field-sensitive, and untyped.  Its soundness proof
follows the general style of abstract interpretation.
The analysis is designed to fit in the CompCert C verified compiler,
supporting future aggressive optimizations over memory accesses.}
}

@techreport{Leroy-Appel-Blazy-Stewart-memory-v2,
author = {Xavier Leroy and Andrew W. Appel and Sandrine Blazy and Gordon Stewart},
title = {The {CompCert} Memory Model, Version 2},
institution = {INRIA},
type = {Research report},
number = {RR-7987},
year = {2012},
month = jun,
url = {http://hal.inria.fr/hal-00703441},
xtopic = {compcert},
abstract = {A memory model is an important component of the formal semantics of imperative programming languages: it specifies the behavior of operations over memory states, such as reads and writes. The formally verified CompCert C compiler uses a sophisticated memory model that is shared between the semantics of its source language (the CompCert subset of C) and intermediate languages. The algebraic properties of this memory model play an important role in the proofs of semantic preservation for the compiler. The initial design of the CompCert memory model is described in an article by Leroy and Blazy (J. Autom. Reasoning 2008). The present research report describes version 2 of this memory model, improving over the main limitations of version 1. The first improvement is to expose the byte-level, in-memory representation of integers and floats, while preserving desirable opaqueness properties of pointer values. The second improvement is the integration of a fine-grained mechanism of permissions (access rights), which supports more aggressive optimizations over read-only data, and paves the way towards shared-memory, data-race-free concurrency in the style of Appel's Verified Software Toolchain project.}
}

@inproceedings{Boldo-Jourdan-Leroy-Melquiond-2013,
title = {A Formally-Verified {C} Compiler Supporting Floating-Point Arithmetic},
author = {Boldo, Sylvie and Jourdan, Jacques-Henri and Leroy, Xavier and Melquiond, Guillaume},
booktitle = {ARITH, 21st IEEE International Symposium on Computer Arithmetic},
pages = {107-115},
publisher = {IEEE Computer Society Press},
url = {http://hal.inria.fr/hal-00743090},
year = 2013,
xtopic = {compcert},
abstract = {Floating-point arithmetic is known to be tricky: roundings, formats,
exceptional values. The IEEE-754 standard was a push towards
computations easier and flourishing. Unfortunately, this is not
sufficient to guarantee the final result of a program, as several
other actors are involved: programming language, compiler,
architecture.  The CompCert formally-verified compiler provides a
solution to this problem: this compiler comes with a mathematical
specification of the semantics of its source language (a large subset
of ISO C90) and target platforms (ARM, PowerPC, x86-SSE2), and with a
proof that compilation preserves semantics.  In this paper, we report
on our recent success in formally specifying and proving correct
CompCert's compilation of floating-point arithmetic.  Since CompCert
is verified using the Coq proof assistant, this effort required a
suitable Coq formalization of the IEEE-754 standard; we extended the
Flocq library for this purpose.  As a result, we obtain the first
formally verified compiler that provably preserves the semantics of
floating-point programs.}
}

@incollection{Leroy-Appel-Blazy-Stewart-memory,
author = {Leroy, Xavier and Appel, Andrew W. and Blazy, Sandrine and Stewart, Gordon},
title = {The {CompCert} memory model},
year = {2014},
month = apr,
booktitle = {Program Logics for Certified Compilers},
editor = {Appel, Andrew W.},
publisher = {Cambridge University Press},
url = {http://vst.cs.princeton.edu/}
}

@article{Boldo-Jourdan-Leroy-Melquiond-JAR,
title = {Verified Compilation of Floating-Point Computations},
author = {Boldo, Sylvie and Jourdan, Jacques-Henri and Leroy, Xavier and Melquiond, Guillaume},
journal = {Journal of Automated Reasoning},
year = 2015,
volume = 54,
number = 2,
pages = {135--163},
xtopic = {compcert},
url = {http://xavierleroy.org/publi/floating-point-compcert.pdf},
urlpublisher = {http://dx.doi.org/10.1007/s10817-014-9317-x},
abstract = {Floating-point arithmetic is known to be tricky: roundings, formats,
exceptional values. The IEEE-754 standard was a push towards
computations easier and flourishing. Unfortunately, this is not
sufficient to guarantee the final result of a program, as several
other actors are involved: programming language, compiler, and
architecture.  The CompCert formally-verified compiler provides a
solution to this problem: this compiler comes with a mathematical
specification of the semantics of its source language (a large subset
of ISO C99) and target platforms (ARM, PowerPC, x86-SSE2), and with a
proof that compilation preserves semantics.  In this paper, we report
on our recent success in formally specifying and proving correct
CompCert's compilation of floating-point arithmetic.  Since CompCert
is verified using the Coq proof assistant, this effort required a
suitable Coq formalization of the IEEE-754 standard; we extended the
Flocq library for this purpose.  As a result, we obtain the first
formally verified compiler that provably preserves the semantics of
floating-point programs.}
}

@inproceedings{Kastner-LBSSF-2017,
title = {Closing the gap -- The formally verified optimizing compiler {CompCert}},
author = {K{\"a}stner, Daniel and Leroy, Xavier and Blazy, Sandrine and Schommer, Bernhard and Schmidt, Michael and Ferdinand, Christian},
urllocal = {http://xavierleroy.org/publi/compcert-SSS2017.pdf},
hal = {https://hal.inria.fr/hal-01399482},
urlpublisher = {http://scsc.org.uk/p135},
booktitle = {SSS'17: Developments in System Safety Engineering: Proceedings of the Twenty-fifth Safety-critical Systems Symposium},
year = {2017},
publisher = {CreateSpace},
pages = {163--180},
xtopic = {compcert},
abstract = {CompCert is the first commercially available optimizing compiler that is formally verified, using machine-assisted mathematical proofs, to be free from miscompilation. The executable code it produces is proved to behave exactly as specified by the semantics of the source C program. CompCert's intended use is the compilation of safety-critical and mission-critical software meeting high levels of assurance. This article gives an overview of the design of CompCert and its proof concept, summarizes the resulting confidence argument, and gives an overview of relevant tool qualification strategies. We briefly summarize practical experience and give an overview of recent CompCert developments.}
}

@inproceedings{Leroy-BKSPF-2016,
author = {Xavier Leroy and Sandrine Blazy and Daniel K\"astner
and Bernhard Schommer and Markus Pister and Christian Ferdinand},
title = {CompCert -- A Formally Verified Optimizing Compiler},
booktitle = {ERTS 2016: Embedded Real Time Software and Systems},
publisher = {SEE},
year = 2016,
url = {http://xavierleroy.org/publi/erts2016_compcert.pdf},
hal = {https://hal.inria.fr/hal-01238879},
xtopic = {compcert},
abstract = {CompCert is the first commercially available
optimizing compiler that is formally verified, using machine-assisted
mathematical proofs, to be exempt from mis-compilation.
The executable code it produces is proved
to behave exactly as specified by the semantics of the
the design of CompCert and its proof concept and then
focuses on aspects relevant for industrial application.
We briefly summarize practical experience and give an
overview of recent CompCert development aiming at industrial usage.
CompCert’s intended use is the compilation of life-critical
and mission-critical software meeting high levels of assurance.
In this context tool qualification is of paramount importance. We
summarize the confidence argument of CompCert and give an overview of
relevant qualification strategies.}
}

@inproceedings{Jourdan-LBLP-2015,
author = {Jacques-Henri Jourdan and Vincent Laporte and
Sandrine Blazy and Xavier Leroy and David Pichardie},
title = {A Formally-Verified {C} Static Analyzer},
booktitle = {POPL 2015: 42nd symposium Principles of Programming Languages},
publisher = {ACM Press},
year = 2015,
url = {http://xavierleroy.org/publi/verasco-popl2015.pdf},
urlpublisher = {http://dx.doi.org/10.1145/2676726.2676966},
pages = {247-259},
xtopic = {analysis},
abstract = {This paper reports on the design and soundness proof, using the Coq
proof assistant, of Verasco, a static analyzer based on abstract
interpretation for most of the ISO~C~1999 language (excluding
recursion and dynamic allocation).  Verasco establishes the absence of
run-time errors in the analyzed programs.  It enjoys a modular
architecture that supports the extensible combination of multiple
abstract domains, both relational and non-relational.  Verasco
integrates with the CompCert formally-verified C~compiler so that not
only the soundness of the analysis results is guaranteed with
mathematical certitude, but also the fact that these guarantees carry
over to the compiled code.}
}

@inproceedings{Bourke-BDLPR-2017,
title = {A formally verified compiler for {Lustre}},
author = {Timothy Bourke and
Lélio Brun and
Pierre-Évariste Dagand and
Xavier Leroy and
Marc Pouzet and
Lionel Rieg},
booktitle = {PLDI 2017: Programming Language Design and Implementation},
pages = {586-601},
publisher = {ACM Press},
year = 2017,
url = {http://xavierleroy.org/publi/velus-pldi17.pdf},
hal = {https://hal.inria.fr/hal-01512286/},
urlpublisher = {https://doi.org/10.1145/3062341.3062358},
xtopic = {compcert},
abstract = {The correct compilation of block diagram languages like Lustre, Scade, and a discrete subset of Simulink is important since they are used to program critical embedded control software. We describe the specification and verification in an Interactive Theorem Prover of a compilation chain that treats the key aspects of Lustre: sampling, nodes, and delays. Building on CompCert, we show that repeated execution of the generated assembly code faithfully implements the dataflow semantics of source programs. We resolve two key technical challenges. The first is the change from a synchronous dataflow semantics, where programs manipulate streams of values, to an imperative one, where computations manipulate memory sequentially. The second is the verified compilation of an imperative language with encapsulated state to C code where the state is realized by nested records. We also treat a standard control optimization that eliminates unnecessary conditional statements.}
}

@inproceedings{CompCert-ERTS-2018,
author = {Daniel Kästner and
Ulrich Wünsche and
Jörg Barrho and
Marc Schlickling and
Bernhard Schommer and
Michael Schmidt and
Christian Ferdinand and
Xavier Leroy and
Sandrine Blazy},
title = {{CompCert}: Practical experience on integrating and qualifying
a formally verified optimizing compiler},
booktitle = {ERTS 2018: Embedded Real Time Software and Systems},
publisher = {SEE},
year = 2018,
month = jan,
hal = {https://hal.inria.fr/hal-01643290},
url = {http://xavierleroy.org/publi/erts2018_compcert.pdf},
xtopic = {compcert},
abstract = {CompCert is the first commercially available optimizing compiler that is formally verified, using machine-assisted mathematical proofs, to be exempt from mis-compilation. The executable code it produces is proved to behave exactly as specified by the semantics of the source C program. This article gives an overview of the use of CompCert to gain certification credits for a highly safety-critical industry application, certified according to IEC 60880. We will briefly introduce the target application, illustrate the process of changing the existing compiler infrastructure to CompCert, and discuss performance characteristics. The main part focuses on the tool qualification strategy, in particular on how to take advantage of the formal correctness proof in the certification process.}
}

@inproceedings{AIS-annot-WCET-2018,
title = {Embedded Program Annotations for {WCET} Analysis},
author = {Schommer, Bernhard and Cullmann, Christoph and Gebhard, Gernot and Leroy, Xavier and Schmidt, Michael and Wegener, Simon},
hal = {https://hal.inria.fr/hal-01848686},
booktitle = {WCET 2018: 18th International Workshop on Worst-Case Execution Time Analysis},
publisher = {Dagstuhl Publishing},
year = {2018},
month = jul,
abstract = {We present \verb'__builtin_ais_annot()', a user-friendly, versatile way to transfer annotations (also known as flow facts) written on the source code level to the machine code level. To do so, we couple two tools often used during the development of safety-critical hard real-time systems, the formally verified C compiler CompCert and the static WCET analyzer aiT. CompCert stores the AIS annotations given via \verb'__builtin_ais_annot()' in a special section of the ELF binary, which can later be extracted automatically by aiT.}
}

@phdthesis{jourdan:tel-01327023,
title = {{Verasco: a Formally Verified C Static Analyzer}},
author = {Jourdan, Jacques-Henri},
hal = {https://hal.archives-ouvertes.fr/tel-01327023},
school = {{Universit\'e Paris 7 Diderot}},
year = {2016},
month = may,
type = {PhD thesis},
hal_id = {tel-01327023},
hal_version = {v1}
}


This file was generated by bibtex2html 1.98.