\documentstyle{article} %\input{vijay-macros} \def\withmath#1{\relax\ifmmode#1\else{$#1$}\fi} \def\withmmode#1{\relax\ifmmode#1\else{$#1$}\fi} \def\Prolog{{\bf Prolog}} \def\Herbrand{{\bf Herbrand}} \def\cc{{\sf cc}} \def\AB{\withmath{{\cal A}(B)}} \def\then{\withmath{\;\rightarrow\;}} \newtheorem{defin}{Definition}[section] \newtheorem{definition}[defin]{Definition} \def\tuple#1{\withmath{\langle #1 \rangle}} \newtheorem{example}{Example}[section] \def\alt{\withmmode{\;{\tt\char`\|}\;}} \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newcommand{\DD}{\withmath{{\cal D}}} \makeatletter {\@definecounter{MetaNote}\@addtoreset{MetaNote}{section}% \expandafter\xdef\csname theMetaNote\endcsname{\expandafter\noexpand \csname thesection\endcsname \@thmcountersep \@thmcounter{MetaNote}}% \global\long\def\MetaNote#1{{\@thm{MetaNote}{MetaNote} #1\@endtheorem}}} %% In case you want Meta-notes not to appear in the printed %% text. e.g. in final version, remove the following %% signs: %% \long\def\MetaNote#1{\relax} {\@definecounter{TechComm}\@addtoreset{TechComm}{section}% \expandafter\xdef\csname theTechComm\endcsname{\expandafter\noexpand \csname thesection\endcsname \@thmcountersep \@thmcounter{TechComm}}% \global\long\def\TechComm#1{{\@thm{TechComm}{Note to Reviewer:} #1\@endtheorem}}} \makeatother \def\IN{\withmath{\mbox{{\tt in}}\;}} \def\all{\withmath{\mbox{{\tt all}}\;}} \def\DO{\withmath{\;\mbox{{\tt do}}\;}} \def\lub{\sqcup} \def\tell{\withmath{\star}} \def\IF{\withmath{\;\mbox{\em if}\;}} \def\THEN{\withmath{\;\mbox{\em then}\;}} \def\ELSE{\withmath{\;\mbox{\em else}\;}} \def\AA{{\cal A}} \def\PP{{\cal P}} \def\glb{\withmath{\sqcup}} \def\up{\withmath{\uparrow}} %%\input{/tigger/saraswat/tex/cc-program-thesis-two-column.tex} % October 22, 1988, Vijay Saraswat (c) Xerox PARC % % Macros for typesetting cc (and FCP and GHC...) programs. % % Exported, top-level macros: \ax, \ag % % Exported environments: ccprogram, fccprogram, Frame % % Updated December 14, 1988. % Also, agent is introduced, with terminator .., since . is used for indexing. % Also ~ is used for label. % Also ++ gets \OrSep, + now gets plus. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % The original code was from Eric Tribble. I have combined it % with the code I wrote modifying the LaTeX tabbing environment % so that each field is processed in math mode. % As it is now, this file should be used from now on for writing % new programs, rather than the old files. % To do: % Allow comment and Lists to span multiple lines. % Allow comment to be started off with --. % 10 November 1988 % Set up the \{ ... \} pairs. \{ starts off a + expression by % opening up a paren and then leaving enough space so that %successive uses of + produce a layout of the form: % ( Ag1 % + Ag2 % ... % + Ag3) % For example, try: %\begin{ccprog} %\ccag %p(X, State):: %\{ grab(X)\vop \then type\_A(State) %+ grab(X)\iop \then type\_B(State) %+ arbafhsiadhoashdis \tthen fasjofjsaod \}. %\end{ccprog} % There is some bogosity right now in how \} works: ideally % just a \-) should have worked, but it seems to reduce % first-tab-stop by 1 for the current line. I suspect this % might be interacting badly with \obeycr. For the time being % just use \} separately by itself on a line. %%%%%%%%%%%%%%%%%%%%% % Some space eating macros from latex.tex %\makeatletter %{\catcode`\^^M=13 \gdef\obeycrax{\catcode`\^^M=13 %\def^^M{\\ }\@gobblecr}% %Note that \obeycr is defined within latex.tex \def\withmath#1{\relax\ifmmode#1\else{$ #1 $}\fi} \def\plus{\withmath{\;\char`\+\;}} %No carriage returns allowed within comment, at present. \def\ccomment#1{% \begingroup% \settowidth{\hangindent}{/*\ } \noindent {\tt -}{\tt -} #1 % \endgroup} \newlength{\globalparindent} \setlength{\globalparindent}{\parindent} %%The following modifies LaTeX's tab environment such that each %%field is processed in math mode, and tt font. Thus spacing is %%as in math mode. Quite delicate. \makeatletter \def\ccaxgtabmods{% \let\bmath=$% \def\@stopfield{\ifmmode\bmath\else\relax\fi\egroup}% \def\@startfield{\global\setbox\@curfield\hbox\bgroup%% \ifmmode\relax\else\bmath\fi\tt}%{ BRACE MATCH HACK \def\@contfield{\global\setbox\@curfield\hbox\bgroup% \ifmmode\relax\else\bmath\fi\tt\unhbox\@curfield}} \makeatother \def\axactivate{% \catcode`;=13\relax \catcode`\:=13\relax %% \catcode`\!=13\relax \catcode`\|=13\relax \catcode`\[=13\relax \catcode`\]=13\relax \catcode`+=13\relax \catcode`-=13\relax \catcode`,=13\relax \catcode`<=13\relax \catcode`>=13\relax \catcode`\~=13\relax \catcode`\==13\relax } {% setup for the macro ax \axactivate \gdef\axpunct{% \axactivate \def:##1{\ifx-##1\relax \withmath\leftarrow% Only in Axioms. \else \ifx:##1{\bf\;\char`\:\char`\:\;} %Only %in %Agents. \else \ifx=##1{\char`\:\char`\=} \else {\;\char`\:\;} ##1 % Separate ask % and tell--only % in Axioms \fi \fi \fi} \def-##1{\if\noexpand>\noexpand##1\relax{\then}\else{\;\char`\-\;} ##1\fi} %Doesn't quite work for ==>. Use \sdk for it. \def=##1{\if\noexpand>\noexpand##1\relax{\tthen}\else{\;\char`\=\;} ##1\fi} %Allows || to appear as \PAR and | to appear as |. \def|##1{\ifx|##1\relax{\;\PAR\;}\else\ifx>##1{\;\hookrightarrow\;} \else{\;{\tt\char`\|}\;} ##1 % Rest operator, in list. \fi\fi} %Carriage-returns within a list will elicit an error. %We want to fix this later. \def\|{\;{\tt \char`\|}\;} \def[{\char`\[\begingroup\def|{\;\char`\|\;}}\def\[{\tt\char`\[} \def]{\char`\]\endgroup}\def\]{\char`\]} \def~{\Lab} \def;{\;\char`\:\;} % Redefined, 17 January 1989. \def,{\char`\,\;} \def<{\leavevmode\hbox{\tt\char`\<}}\def\lt{\char`\<} \def>##1{\if\noexpand>\noexpand##1{\gg} \else{\leavevmode\hbox{\tt\char`\>}} ##1 % Rest operator, in list. \fi} \def\gt{\char`\>} %% \def!{\ask} \def\:{\char`\: \>} \def\R{\=\+}\def\L{\-} \def\W{\hspace{\globalparindent}} \def\I{\W\R} \def+##1{\ifx+##1{\OrSep}\else{\plus}##1\fi} \def\{{\char`\{ \=\+ }\def\}{\char`\}\-} \def\({(\R}\def\){)\L} } } % end setup group \makeatletter \long\def\ccquery #1.{\pushtabs \leftarrow \=\+ #1. \-\\ \poptabs} \long\def\ccag#1.{\pushtabs% \hspace{\globalparindent}\=\+ \kill \hspace{-\globalparindent} #1 \- \\ \poptabs \@gobblecr} \long\def\agent#1..{\pushtabs% \hspace{\globalparindent}\=\+ \kill \hspace{-\globalparindent} #1. \- \\ \poptabs \@gobblecr} \long\def\sagent#1..{\pushtabs% \hspace{\globalparindent}\=\+ \kill \hspace{-\globalparindent} #1 \- \\ \poptabs \@gobblecr} \long\def\ccax#1.{\pushtabs% \hspace{\globalparindent}\=\+ \kill \hspace{-\globalparindent} #1. \- \\ \poptabs \@gobblecr} %\long\def\ccax#1.{% % \hspace{\globalparindent}\= \+ \kill % \hspace{-\globalparindent}#1.\-} % %\poptabs\-} \def\sptabular{tabular} \def\sptabularx{tabular*} \def\sptabbing{tabbing} \def\begin#1{\@ifundefined{#1}{\def\@tempa{\@latexerr{Environment #1 undefined}\@eha}}{\def\@currenvir{#1}\def\@tempa{\csname #1\endcsname}}\begingroup \def\sptabfont{#1} \ifmmode\else \ifx\sptabfont\sptabular \vskip0pt %%\vskip2pt %%\hrule height .25pt %%\vskip.6pt \eightpoint \else \ifx\sptabfont\sptabularx \vskip0pt %%\vskip2pt %%\hrule height .25pt %%\vskip.6pt \eightpoint\else \ifx\sptabfont\sptabbing \vskip0pt %%\vskip6pt %%\hrule height .25pt \eightpoint %%\vskip-8pt \fi\fi\fi\fi \@tempa} \def\end#1{\csname end#1\endcsname% \def\sptabfont{#1}% \ifmmode\else \ifx\sptabfont\sptabular %%\vskip.6pt %%\hrule height .25pt %%\vskip2pt \vskip0pt \else \ifx\sptabfont\sptabularx %%\vskip.6pt %%\hrule height .25pt %%\vskip2pt \vskip0pt \else \ifx\sptabfont\sptabbing %%\vskip1sp %%\hrule height .25pt %%\vskip2pt \vskip0pt \fi\fi\fi\fi\endgroup% \@checkend{#1}\if@ignore \global\@ignorefalse \ignorespaces\fi} \newenvironment{ccprogram}% {%\ifvmode\nointerlineskip \makebox[.6\linewidth]\fi% \begingroup\tt\axpunct\obeycr\ccaxgtabmods\begin{tabbing}}% {\end{tabbing}\endgroup\ignorespaces\global\@ignoretrue} \makeatother \font\thirtysixpoint=cmr10 at 36pt \font\thirtysixbold=cmbx10 at 36pt \font\tenpoint=cmr10 \font\tenbold=cmbx10 \font\tenit=cmti10 \font\boldfourteen=cmbx10 scaled \magstep2 %% used in Title \font\twelverm=cmr10 scaled\magstep1 \font\eleven=cmr10 scaled \magstephalf %% used in subtitle \font\elevenbold=cmbx10 scaled \magstephalf %% used in toc \font\nine=cmr9 \font\boldeight=cmbx8 \font\eightrm=cmr8 \font\eightbf=cmbx8 \font\eightit=cmti8 \font\eighti=cmmi8 \font\eightsy=cmsy8 \font\eightsl=cmsl8 \font\eighttt=cmtt8 \font\sixrm=cmr6 \font\sixi=cmmi6 \font\sixsy=cmsy6 \font\fivei=cmmi5 \font\fivesy=cmsy5 \def\eightpoint{\def\rm{\fam0\eightrm}% \textfont0=\eightrm \scriptfont0=\sixrm \textfont1=\eighti \scriptfont1=\sixi \scriptscriptfont1=\fivei \textfont2=\eightsy \scriptfont2=\sixsy \scriptscriptfont2=\fivesy \def\it{\fam\itfam\eightit}% \textfont\itfam=\eightit \def\sl{\fam\slfam\eightsl}% \textfont\slfam=\eightsl \def\bf{\fam\bffam\eightbf}% \textfont\bffam=\eightbf \def\tt{\fam\ttfam\eighttt}% \textfont\ttfam=\eighttt \setbox\strutbox=\hbox{\vrule height8pt depth3pt width0pt}% \baselineskip=9pt\rm} \begin{document} \title{ATMS-based constraint programming} \author{Vijay A. Saraswat \and Johan de Kleer \and Brian C. Williams\\ \quad\\ Xerox PARC,\\ 3333 Coyote Hill Road\\ Palo Alto, Ca 94304} \date{\today} \maketitle \begin{abstract} We are developing a broad and comprehensive theory of constraint programming, subsuming the paradigms of functional and logic programming \cite{my-thesis-book,popl90,popl91,angelic-nondet,local-cons}. The theory is based on a view of computation organized around the notions of constraint systems as systems of {\em partial information}, of {\em store-as-constraint} and {\em process-as-information-transducer}, and emphasizes the use of constraints for communication and control between concurrently executing processes. Such languages offer a very simple and abstract level for specifying computations for combinatorial exploration. In prior work \cite{angelic-nondet,local-cons} we have shown how to smoothly integrate \Prolog-style backtracking and various local consistency techniques within this framework. In this paper we develop the integration of this framework with the alternate approach to combinatorial exploration predominant in AI, {\em truth-maintenance systems} (TMSs) \cite{TMS,McAllester-90,ATMS-1}. In particular, we give a generic mechanism for enriching {\em any} constraint system $B$ with the notion of assumptions to obtain another constraint system \AB. To implement \AB, ATMS-implementation techniques are integrated with the constraint-solver for $B$. All the usual \cc\ combinators --- such as ask, tell, conjunction (parallel composition) and hiding, all closed under recursion --- are immediately available on top of \AB, providing a simple and powerful constraint language. In addition, several new combinators can now be defined which take advantage of the extra structure of assumptions built into the constraint system. As before, each of these new combinators can be given a very simple mathematical characterization as closure operators over the underlying constraint system. They enable a simple and elegant expression of various previously developed programming notions on top of ATMSs, such as (various kinds of) consumers \cite{ATMS-2}. \end{abstract} \section{Introduction} We take it as being self-evident that a large class of computations in AI (and knowledge representation, and natural language understanding) centrally involves the representation and manipulation of constraints. Our task is to develop the conceptual, mathematical and practical foundations of a general theory of computation based on constraints. %% that is, on the %%representation and manipulation of systems of partial %%information. %%Concurrent constraint programming %%\cite{my-thesis-book,popl90,popl91} is a simple and powerful %%model of computation based on the notions of {\em %%store-as-constraint} and {\em process as information transducer}. The {\em concurrent constraint} (\cc) programming approach \cite{my-thesis-book,popl90,popl91} is based on treating a constraint system as just a system of partial information {\em a la} \cite{scott-info-sys}: essentially, a set of objects (with unanalyzed internal structure) equipped with an entailment relation. Arising from earlier work in constraint logic programming \cite{lassez-const} and concurrent logic programming \cite{alps,cp-op-sem}, the basic move in this paradigm is to replace the notion of store-as-valuation central to von Neumann computing with the notion that the store is a constraint, that is, a set of pieces of partial information about the values that variables can take. The usual notions of ``read'' and ``write'' no longer make sense; instead computation progresses via the execution of {\em ask} and {\em tell} actions. An ask operations takes a constraint (say, $c$) and uses it to probe the structure of the store: it succeeds if the store contains enough information to entail $c$. Tell takes a constraint and conjoins it with the constraints already in the store. (Thus computation progresses via the monotonic accumulation of information.) Concurrency arises because any number of agents may simultaneously interact with the store. Synchronization is achieved via a blocking ask --- an agent blocks on a constraint $c$ if the store is not strong enough to entail $c$; it remains blocked until such time as (if ever) some other concurrently executing agents add enough information to the store for it to be strong enough to entail the query. The very wide applicability of the underlying notion of a constraint system means that the notion of constraint programming based on it is very general (see \cite{my-thesis-book}) --- indeed it encompasses logic programming, concurrent logic programming, data-flow and (at least first-order) functional programming. Hand-in-hand with this generality goes mathematical simplicity --- (determinate) programs in this framework were shown to correspond to {\em closure operators} over the underlying constraint system $E$, and non-determinate programs were shown to correspond to closure operators over the Smyth power-domain of $E$ \cite{popl91,angelic-nondet}. Closure operators --- operators over a partial order that are monotone, idempotent and extensive --- have a very beautiful algebra summarized in Appendix~\ref{clop}; in particular they are closed under a very natural notion of parallel composition. \cite{my-thesis-book,local-cons} showed how this paradigm accounts for the pre-nascent notions of constraint programming evident in the work of \cite{SYN} (constraint propagation techniques) and in \cite{stefik} (constraint posting), and how these ideas can be extended to accomodate the notion of local consistency techniques for solving networks of constraints. \subsection{Control of search.} The predominant style for combinatorial computation, embodied within Prolog, is to cast the computation as a search in an And-Or tree. Various techniques for searching such trees efficiently (perhaps in parallel) have been devised. For example, the Andorra principle, an embodiment of the least commitment principle, governs the selection of nodes to expand next at an And-phase: ``determinate'' nodes --- those that have at most one Or-branch below them which is not known to be inconsistent --- are expanded first. A major drawback of such approaches is that there is no possibility for communication between different Or-parallel branches. For several applications, such as diagnosis, it is necessary for the problem-solver to be able to move back and forth between different Or-parallel worlds with facility. (For %% BCW's comment: Is it that you need to move between them, or %% that you need to pursue them both? Moving between them %% corresponds to shared inference? example, each consistent Or-parallel world corresponds to a diagnosis). Also, the techniques for controlling search available when exploring And-Or trees top-down tend to be based on controlling the {\em forward phase} of the search: the phase that determines what nodes to expand next. (Indeed, the integration of these techniques with the \cc\ framework has been a topic of recent research and is now reasonably well-understood \cite{hentenryck,local-cons}.) In general, such techniques offer a significant computational advantage over chronological backtracking (by detecting inconsistency earlier). However, they are of less use in many AI applications in which not all decisions depend on each other. For such applications, the methods and techniques of {\em truth maintenance systems} have been developed over the last decade \cite{TMS,ATMS-1,McAllester-90}. Truth Maintenance Systems offer an attractive and conceptually very different computation model from the And-Or trees underlying Prolog-like languages. Within the standard view of constraint programming, non-determinism is reflected by {\em splitting}: at a branch-point, conceptually, copies of the state of the world (the store, other conjunctive agents) are made, and these copies are kept strictly insulated from each other. In the TMS viewpoint, no splitting is done, even conceptually: instead an {\em assumption} is associated with each branch, and all the information on that branch is made conditional on that assumption. To give a simple example, Prolog-like languages treat a formula $\phi \wedge (\psi_1 \vee \psi_2)$ as the (equivalent) formula $(\phi \wedge \psi_1) \vee (\phi \wedge \psi_2)$; TMSs treat it as the (equivalent) formula $\phi \wedge (A \then \psi_1) \wedge (\neg A \then \psi_2)$ (for $A$ some new Boolean variable). In addition, in {\em assumption-based} TMSs \cite{ATMS-1,ATMS-2,ATMS-3}, each formula is associated with a {\em label} which records the set of conditions (conjunctions of assumptions) under which the formula has been asserted. The fundamental idea in this paper is to extend the structure of tokens underlying the constraint programming language by building in the notion of assumptions and a restricted form of implication. Assume given a constraint system with vocabulary $D$. In the new constraint system, tokens of the form $(\{A_0, \ldots, A_{n-1}\}, c)$ are allowed, where the $A_i$ are propositional variables, and $c$ is a token in $D$ or (a conjunction of) other propositional variables. Intuitively, such a token expresses the idea that the piece of information $c$ is known to hold --- but only if the assumptions $A_0,\ldots, A_{n-1}$ are in force. We should emphasize that our approach yields not just {\em a} programming language on top of an ATMS, but an {\em infinite} family of languages, depending upon the particular constraint system employed. Most ATMS-based languages in the past can be thought of as having a very simple built-in constraint system --- we shall present such a constraint system in detail later in this paper. \section{Assumptive constraint systems} In this section we briefly review the notion of a simple constraint system, and discuss how to generate from an arbitrary constraint system $B$ (the ``base'' constraint system), and assumptive constraint system $\AB$, given some underlying set of ``nodes'' (Definition~\ref{cs-with-a}). A simple characterization theorem for such constraint systems is also given. In the following, for any set $D$, we shall let $P(D)$ ($p(D)$) stand for the set of all (finite) subsets of $D$. Also, we use the symbol $\subseteq_f$ for the ``is a finite subset of'' relationship between two sets. \begin{definition} A {\em simple constraint system} $A$ is a structure \tuple{D_A, \vdash_A} where $\vdash_A$ is an {\em entailment relation on $D_A$}, that is, a relation $\vdash_A\; \subseteq p(D_A) \times D_A$ satisfying: \begin{description} \item[A1] $u \vdash_A c$ for every $c \in u$. \item[A2] if $u \vdash_A c$ for every $c \in v$, and $v \vdash_A d$, then $u \vdash_A d$. \end{description} \end{definition} Together these properties imply monotonicity: if $u \vdash_A d$ and $v \supseteq u$ then $v \vdash_A d$. %%For simplicity, we shall assume %%that there is a special token, $\false_D \in D$ such that %%$\{\false_D\} \vdash d$ for all $d \in D$. In the following, we shall overload the entailment symbol, letting $u \vdash u'$ (for $u,u'$ finite sets of tokens) stand for the conjunction of assertions $u \vdash d'$ for all $d' \in u'$. Finally, we take $\approx$ to be the equivalence relation on $p(D)$ induced by the pre-order $\vdash$. \begin{example}\label{generic-defn} For any set of tokens $D$, the {\em generic constraint system over $D$}, $G(D)$ is given by $\tuple{D, \{(u,d) \alt d \in u\}}$, It is easy to check that (A1) and (A2) are satisfied and that $u \vdash_{G(D)} v$ iff $u \supseteq v$. Let $\bot \in D$ be a special pre-designated token. Then the {\em generic constraint system over $D$ with falsehood $\bot$}, $G_{\bot}(D)$ is \tuple{D, \{(u,d) \alt \bot \in u\ \mbox{or}\ d \in u\}}. Again, it is easy to check that (A1) and (A2) are satisfied. As another example, note that \tuple{\emptyset, \emptyset} specifies a constraint system with no tokens --- called the {\em vacuous} constraint system. \end{example} In the above example, a set of tokens in $G(D)$ is nothing more than the ``sum of its parts'', that is a ``free'' conjunction. $G(D)$ is essentially the ``strongest'' constraint system on top of a token-set $D$ --- any other constraint system on $D$ must have an entailment relation that is weaker (i.e., is larger as a set) than $\vdash_G(D)$. Alternatively, $\vdash_G(D)$ is the intersection of all possible entailment relations on $D$. $G_{\bot}(D)$ is just like $G(D)$ except that it has the notion of falsehood built into it: $\bot$ satisfies the property that for every token $d \in D$, $\{\bot\} \vdash_{G_{\bot}(D)} d$. Shortly we shall see that for a set $N$ of ``nodes'' $G_{\bot}(N)$ can be thought of as the constraint system underlying TMSs. We now show how to generate an assumptive constraint systems \AB{} from a given base constraint system $B$. We first fix a (possibly infinite) set $N$ of propositional variables (called {\em nodes}). For simplicity, we shall assume that $N$ is chosen to be disjoint from $D_B$. We identify a (possibly infinite) subset $A \subset N$ of {\em assumptions}. We require that there be a special symbol, conventionally written $\bot$ to signify falsehood, in $N$ but not in $A$. Now we shall obtain the assumptive constraint system $\AB=\tuple{D_{\AB}, \vdash_{\AB}}$ by combining $B$ with $G_{\bot}(N)$ in a particular way. \subparagraph{Defining $D_{\AB}$.} Let $D=D_{B} \cup D_{G_{\bot}(N)}$\footnote{Note, of course, that $D_{G_{\bot}(N)}=N$; we chose the more cumersome expression to emphasize the symmetric nature of the construction.}. A $d \in D$ is called a {\em datum}; a datum is a node (if it comes from $D_{G_{\bot}(N)}$, i.e., $N$) xor a {\em base constraint} (if it comes from $D_B$).\footnote{Just a word of warning: those familiar with the TMS literature will find this terminology slightly non-standard.} Now we can define $D_{\AB}$: it is just the set of pairs $(e, d)$, where $e$ is a finite subset of $N$, and $d \in D$. Intuitively, a token $(e,d)$ asserts that $d$ must hold whenever each of the nodes in $e$ hold. In such a pair, we shall call $e$ the {\em antecedent} and $d$ the {\em consequent}. Conventionally, tokens whose consequents are nodes (rather than base constraints) are called {\em justifications}. Each justification $(\{n_1,\ldots, n_k\}, n)$ can be thought of as the propositional Horn clause $n_1 \wedge \ldots \wedge n_k \rightarrow n$. Also, a token of the form $(e,\bot)$ is called a {\em nogood} (since it represents the assertion that not all of a set of nodes can hold simultaneously). Note that we do not allow basic constraints $d \in D_{B}$ to occur in antecedents of tokens, since the ATMS is just a propositional engine. This effect can be obtained by means of a user program as shown in Section~\ref{progg}. In the following, whenever a datum $d$ occurs in a context where an element of $D_{\AB}$ is expected, we should think of $d$ as shorthand for $(\emptyset, d)$. Similarly, for $u \subseteq D$, whenever $(e,u)$ appears in a context where a subset of $D$ is expected, we should take it to be the set $\{(e, d) \alt d \in u\}$, and for $z \subseteq D_{\AB}$ whenever $(e, z)$ appears in a context where a subset of $D_{\AB}$ is expected, we should take it to be the set $\{(e \cup e', d') \alt (e',d') \in z\}$. Finally, for any set $Q \subseteq D_{\AB}$, we let $J(Q)$ denote its subset of justifications. (Thus, the set of justifications is just $J(D_{\AB})$.) \subparagraph{Defining $\vdash_{\AB}$.} We define $\vdash_{\AB}$ in three steps. First we define an entailment relation on justifications, then we define an entailment relation on the data-set $D$, and finally we use these two relations to define $\vdash_{\AB}$. The relation $\vdash_J$ on the set of justifications is defined by: $u \vdash_J r$ iff the conjunction of each token in $u$ (viewed as a propositional formula) propositionally entails $r$ (viewed as a propositional formula). Note that for a conjunction of nodes $e$ and node $n$, $e \vdash_{G_{\bot}(N)} n$ iff $e \vdash_J n$.\footnote{Note that by our notational conventions, $e \vdash_J n$ should be read as: $\{(\emptyset, x) \alt x \in e\} \vdash_J (\emptyset, n)$.} %% In %%future we shall use the notation $\overline{e}$ (for $e \in %%p(A)$) to stand for the set of justifications $\{(\emptyset, a) %%\alt a \in e\}$, so the previous assertion could be written as: %%$e \vdash_0 a$ iff $\overline{e} \vdash_p \overline{a}$. The relation $\vdash_D$ is obtained by extending $\vdash_{B}$ and $\vdash_{G_{\bot}(N)}$ to $D = D_{B} \cup D_{G_{\bot}(N)})$ in the obvious way: roughly, $u \vdash_D d$ if $u$ entails $d$ in the base constraint system $B$ or $u$ entails $d$ in the ``TMS constraint system'' $G_{\bot}(N)$. The only interesting aspect of the definition is that inconsistency must be handled correctly. \begin{definition} Define $\vdash_D \subseteq p(D) \times D$ by: $u \vdash_D d$ iff one of the following conditions holds: \begin{itemize} \item $u$ is inconsistent; that is, $u \cap D_{G_{\bot}(N)} \vdash_{G_{\bot}(N)} \bot$ or $u \cap D_{B} \vdash d'$ for all $d' \in D_{B}$, \item $u$ entails $d$ considering only the tokens in $u$ of the same type as $d$; that is, $d \in D_{B}$ and $u \cap D_{B} \vdash_{B} d$, or $d \in D_{G_{\bot}(N)}$ and $u \cap D_{G_{\bot}(N)} \vdash_{G_{\bot}(N)} d$). \end{itemize} \end{definition} It is easy to check that this relation satisfies (A1) and (A2). Given a conjunction of nodes $e$ and a finite set of tokens $u$, the {\em $e$-context of $u$}, denoted $u_e$, is just the set $$\{d \alt (e',d) \in u, J(u) \cup e \vdash_J e'\}$$ of all data that ``hold'' in $e$. We are now ready to give the main definition: \begin{definition}[Assumptive constraint systems]\label{cs-with-a} For a constraint system $B$, and an underlying set of nodes $N$ and assumptions $A$, the assumptive constraint system \AB{} is the structure \tuple{D_{\AB}, \vdash_{\AB}} where $\vdash_{\AB}$ is given by: \begin{quotation} $u \vdash_{\AB} (e,d)$ iff $u_e \vdash_D d$ \end{quotation} \end{definition} As usual, the elements of the constraint system, denoted $|\AB|$ are the $\vdash_{\AB}$-closed subsets of $D_{\AB}$. For every set $u \subseteq D_{\AB}$ we denote by $\overline{u}$ the smallest $\vdash_{\AB}$-closed set containing $u$. We define a conjunction of nodes $e$ to be {\em inconsistent} in $u$ iff $u \vdash_{\AB} (e,\bot)$. %%Also, the {\em label} for $d \in (D \plus %%A)$ in $u$ is just the set of all conjunctions of nodes $e$ such that %%$(e,d) \in u$. \begin{example} The simplest example of an assumptive constraint system is obtained by taking $B$ to be the vacuous constraint system. In such a case, the tokens of $\AB$ are just of the form $(e, n)$, and $\vdash_{\AB}$ is propositional entailment. Such an assumptive constraint system can be implemented directly with an ATMS. \end{example} The central property of the implication inherent in tokens $(e,z)$ is that it can be regarded as adjoint to conjunction in the following precise sense familiar from logic: \begin{theorem} For any set $u,z \subseteq D_{\AB}$, and $e \subseteq_f A$: $$u \vdash_{\AB} (e,z)\; \mbox{iff}\; u \cup e \vdash_{\AB} z$$ \end{theorem} Several observations can readily be made. Since $u \cup e \vdash_{\AB} u$, it follows that $u \vdash_{\AB} (e,u)$. Also, we get $e \cup (e,z) \vdash_{\AB} z$, and $e \cup (e,z) \vdash_{\AB} e \cup z$, since $(e,z) \vdash_{\AB} (e,z)$. From the last statement, employing the adjunction again we get $(e,z) \approx_{\AB} (e, e \cup z)$. Note also that $e \cup u_{e}=(e \cup u)_{\emptyset}$ --- that is, given a store $u$, a datum is in the context labelled by $e$ iff it is in the context labelled by $\emptyset$, provided that the nodes $e$ are asserted as being true. \subsection{Canonical representations of conditional constraints} In ATMS terminology, an {\em environment} is a conjunction of assumptions, a {\em label} for a node $n$ is the set of environments $s$ such that $(s, n)$ is derivable from the given justifications. An environment $s$ for $n$ is said to be {\em sound} if $(s,n)$ is entailed by the store, and consistent if $(s, \bot)$ is not entailed by the store. A label $l=\{s_0, \ldots, s_{k-1}\}$ for $n$ is said to be {\em sound} if each $s_i$ is sound for $n$, {\em consistent} if each $s_i$ is consistent for $n$, {\em complete} if for any sound environment $s$ for $n$, there is an $i$ such that $s \supseteq s_i$ and {\em minimal} if for all $i < k$, if $s_i \subseteq s$ and $s$ is a sound environment for $n$, then $s \subseteq s_i$. The basic ATMS algorithm maintains sound, consistent, complete and minimal labels for each node. We note below some consequences of our definitions which justisfy the label propagation algorithm in the ATMS. For any two conjunctions of nodes $e,e'$, recall that $e \vdash_{G_{\bot}(N)} e'$ iff $e \supseteq e'$ or $\bot \in e$. \begin{proposition} For $e,e' \in p(A)$ and $d \in (D + A)$, $\{(e,d)\} \vdash_{\AB} (e',d)$ whenever $e' \vdash_{G_{\bot}(N)} e$. Further, $e \supseteq e'$ implies $u_e \supseteq u_{e'}$. \end{proposition} As a special case, this proposition says that if a datum is in (the context of) a given environment, then it is also in (the context of) every superset environment. \begin{proposition} For any $e \in p(D_{G_{\bot}(N)})$ $\{(e, \bot)\} \vdash_{\AB} (e,d)$ for all $d \in D$. \end{proposition} As a special case, this proposition says that every datum holds in an inconsistent environment (an environment in the label of $\bot$). These two propositions are used internally in the ATMS implementation of the generic constraint system over an arbitrary token-set $D$. We can think of the canonical form maintained internally by the ATMS as being obtained by deleting from (the closure of) any finite set of tokens $u$: \begin{itemize} \item tokens $(s,d)$ (for $d \not=\bot$) if $(s,\bot) \in u$ (elimination of nogood environments from labels). \item tokens $(s',d)$ if $(s,d) \in u$, for $s \subset s'$ (eliminating non-minimal environments). \end{itemize} If these two rules are followed, the label for each token $d \in D$ will be consistent, sound, complete and minimal. \section{A simple programming language}\label{progg} The notion of computation on top of an assumptive constraint system is no different from that on top of any constraint system. The store consists of a set of tokens, and computation progresses via a number of (possibly recursively defined) agents running in parallel, checking for the presence of certain tokens in the store (ask), and adding some other tokens to the store (tell). The role of the ``problem-solver'' in the usual TMS-architecture is played here by the user program, and the role of the ATMS is played by the constraint-solver implementing the entailment relation on which the given constraint system is based. In the case of an assumptive constraint system, this constraint solver can conceptually be thought of as consisting of two parts --- a part which deals with managing environments (the TMS) and the part which deals with performing the entailment operations in the underlying constraint system. (In usual TMS/problem-solver architectures, the latter is thought of as part of the problem-solver: thus the ``usual'' TMS architecture is obtained by taking as built-in the vacuous constraint system.) In addition to ask, tell and parallel composition operations --- which were first defined in the form below in \cite{popl91} --- a number of other combinators can be defined which exploit the extra structure of tokens and provide extra control over execution. In the following we shall follow the strategy of defining the new combinators by giving their mathematical denotations and then describing informally how such combinators can be implemented. This allows a far simpler and more precise description than is possible if one were to rely purely on implementational notions. As before, the denotation of each agent will continue to be a closure operator over the set of elements of the underlying constraint system, in this case $|\AB|$. We shall represent each closure operator by the set of its fixed-points. \begin{table} \begin{tabular}{|l|} \hline \begin{minipage}[t]{\textwidth} {\bf Syntax.} $$ \begin{array}{lll} P {::=} & D.A & \mbox{\em Programs}\\ D {::=} & \epsilon \alt p(X) :: A \alt D.D & \mbox{\em Declarations}\\ A {::=} & & \mbox{\em Agents}\\ & c\tell & \mbox{\em Tell}\\ & \alt c \rightarrow A & \mbox{\em Ask}\\ & \alt A \wedge A & \mbox{\em Conjunction (parallel composition)}\\ & \alt \IN e \DO A & \mbox{\em Focussing}\\ & \alt \all A & \mbox{\em ??}\\ & \alt \exists X. A & \mbox{\em Hiding}\\ & \alt p(X) & \mbox{\em Procedure Call} \end{array} $$ {\bf Semantic Equations.} $$ \begin{array}{l} \AA(c)e = \{d \in |D_a| \alt d \geq c\} \\ \AA(c \then A)e = \{d \in |D_a| \alt d \supseteq c \Rightarrow d \in \AA(A)e\} \\ \AA(A \wedge B)e = \AA(A)e \cap \AA(B)e \\ \AA(\exists X . A)e = \{ c \in |D_a| \alt \exists_X c = \exists_X d \hbox{\ for some\ }d \in \AA(A)e \} \\ \AA(\IN e' \DO A)e = \{d \in |D_a| \alt \overline{d \cup e} \in \AA(A)e\}\\ \AA(\all A')e = \cap_{e \in p(A)} \AA(\IN e \DO A')(e)\\ \AA(p(X))e = {\bf S}_{[X/\alpha]}e(p)) \quad\\ \DD(\epsilon)e = e \\ \DD(p(X) :: A . D) = \DD(D)e[p \mapsto {\bf S}_{[\alpha/X]}\AA(A)e)] \quad\\ \PP(D.A) = \AA(A)\mbox{\em fix}(\DD(D)) \\ \end{array} $$ Above, $\alpha$ is some variable in the underlying constraint system which is not allowed to occur in user programs. (It is used as a dummy variable during parameter passing.) \end{minipage}\\ \hline\end{tabular} \caption{Denotational semantics for the ATMS-based \cc\ languages}\label{sem-table} \end{table} \paragraph{Tell actions} Given any finite set of tokens $c$, the agent $c\tell$\footnote{When there is no risk of confusion, we write a $c\tell$ as $c$.} adds the tokens in $c$ to the current store. Using lambda-notation, the tell combinator $c\tell$ is $\lambda z. \overline{z \cup c}$. The set of fixed-points of such an operator are exactly those elements of $|D_a|$ which contain all the tokens in $c$: $$c\tell=\{d \in |D_a| \alt c \subseteq d\}$$ \begin{example} Execution of the tell agent $(\{A_1, A_2\}, X=1)$ causes the token $(\{A_1, A_2\}, X=1)$ to be added to the store. In operational terms, this causes the label for $X=1$ to be updated to reflect the fact that the conjunctions of assumptions $A_1$ and $A_2$ is sufficient to cause $X=1$ to hold. \end{example} More generally, telling the store the (justification) token $(\{n_1,\ldots, n_k\},n)$ corresponds to the TMS notion of ``installing'' the justification $n_1 \wedge \ldots \wedge n_k \rightarrow n$. In particular, telling the token $(\{n_1,\ldots, n_k\}, \bot)$ is asserting that the set $\{n_1,\ldots,n_k\}$ is a nogood. \paragraph{Ask} For any finite set of tokens $c$ and agent $A$, the ``ask'' agent $c\then A$ (read: ``$c$ then $A$'') is defined as follows: on any input $z$, examine if $z$ has enough information to entail (each token in) $c$. If so, then return the result of running $A$ in $z$; else return $z$ unchanged. Using lambda-notation, the function is: $c \then A=\lambda z. \IF\ c \subseteq z \THEN A(z) \ELSE z$. It is easy to see that this defines a closure operator (if $A$ is a closure operator). In terms of fixed-points: $$c \then A = \{z \in |D_a| \alt c \subseteq z \Rightarrow z \in A\}$$ \begin{example} The agent $((\{A_1\}, X\geq 3), (\{A_2\}, Y = 4)) \then (\{B\}, Z=1)$ suspends until there are enough constraints in the contexts $u_e$ (for environments $e$ that entail $\{A_1\}$) to entail $X \geq 3$, (this is what it means for the store $u$ to entail the token $(A_1,X \geq 3)$), and similarly for $(\{A_2\},Y=4)$; and if and when these conditions are satisfied the agent fires and adds the token $(\{B\},Z=1)$ to the store. (Note that the antecedent of the token added does {\em not} have to be related to the antecedents of the tokens in the ask-part of the agent. \end{example} \subparagraph{Relationship with consumers.} Given a set of tokens $u$, the ask action $(e,e')$ succeeds iff the conjunction of the nodes in $e$ implies each of the nodes in $e'$, given $u$. (In ATMS-terminology, a node $a$ is said to imply a node $b$ if every environment in the label for $a$ is a subset of an environment in the label for $b$.) This notion is the basic building block of the {\em inclusive implication consumers} of \cite{ATMS-3}, which can therefore be expressed as particular kinds of such ask-and-tell agents. Similarly, the {\em contradiction consumers} of \cite{focus-ATMS} are also a special case of such ask-and-tell agents. Suppose we want an agent $A$ to be executed whenever it is discovered that a particular environment $e$ is inconsistent. This is expressed by running the agent $$(e,\bot) \then A$$ which asks whether the token $(e, \bot)$ can be entailed in the current store, and if so, runs $A$. More generally, ask and tell combinators provide capabilities that are not available in the usual consumer architecture. Consumers do not explicitly reference node labels, whereas both ask and tell operations work on tokens of the form $(e,d)$, for $e$ an environment and $d$ a basic token or an assumption. \paragraph{Parallel composition} For any agents $A_1$ and $A_2$, their parallel composition, written $(A_1,A_2)$, is the agent which on an input store $z$ alternately applies $A_1$ and $A_2$ (in any order), until no progress can be made (that is, a value is reached which is a fixed-point of both). It is not hard, though a bit tedious, to show that as long as both $A_1$ and $A_2$ are executed ``fairly'' the resulting store {\em does not depend} on the order of application of the agents.\footnote{An execution sequence is fair if there is no agent which is infinitely often enabled in the sequence but never executed. The possibility of infinite execution sequences arises in the presence of recursion.} In terms of fixed-points, parallel composition has a startlingly simple denotation: $$(A_1, A_2)= A_1 \cap A_2$$ That is, the set of fixed-points of $(A_1, A_2)$ is exactly the intersection of the set of fixed-points of $A_1$ and $A_2$. Parallel composition is the fundamental combinator for treating a collection of agents as a single agent; in TMS-terminology, for treating a collection of consumers as a single consumer. \paragraph{With and All combinators} We now introduce a combinator which exploit the structure of tokens in $D_{\AB}$. The $\IN$ combinator is of the form: $$ \IN e \DO A$$ Intuitively, the idea is to isolate just the constraints in the store which are in force given the assumptions $e$, and to execute $A$ in such a store. More operationally, to run $\IN e \DO A$ in the store $u$, the system isolates just the constraints $u'$ that hold in the environment $e$, (logically, $e'$ is just the ($\vdash_{\AB}$-closure) of the set $e \cup u$), runs $A$ in $u'$, and then adds to $u$ the constraint $(e, A(u'))$. More precisely, the definition is:\footnote{Recall that for any $u \subseteq D_a$, $\overline{u}$ is the smallest set containing $u$ closed under the entailment relation $\vdash_a$.} $$\IN e \DO A=\lambda z. \overline{z \cup (e, A(\overline{e \cup z}))}$$ \begin{theorem} In terms of fixed-points: $$\IN e \DO A = \{z \in |D_a| \alt \overline{z \cup e} \in A\}$$ \end{theorem} The calculation above is routine and easy. \begin{example} The agent: $$\IN \{A_1, A_2\} \DO (X=1 \then Y > 3)$$ is identical to the simple ask-tell agent: $$ (\{A_1, A_2\}, X=1) \then (\{A_1, A_2\}, Y > 3)$$ \end{example} In fact, as the next theorem demonstrates, for every finite (recursion-free) program containing the \IN\ combinator, there is an equivalent program without the \IN\ combinator. But the whole point of the $(\IN e \DO A)$ construction is that $A$ can in general be a very complex (perhaps even recursively defined agent) and the translation may not be as straightforward as above. Besides this gives a conceptually simple way of separating the context from the agent which it is desired to run in that context. \begin{theorem} This combinator satisfies the following laws: $$\begin{array}{l} \IN e \DO c = (e,c)\\ \IN e \DO (\IN e' \DO A)= \IN (e \cup e') \DO A\\ \IN e \DO (c \then A) = (e, c) \then (\IN e \DO A)\\ \IN e \DO (A_1,A_2)=(\IN e \DO A_1, \IN e \DO A_2)\\ \IN \emptyset \DO A = A\\ \end{array} $$ \end{theorem} The proofs of all these assertions are straightforward from their definitions. \subparagraph{Relationship with consumers.} The ``\IN'' combinator corresponds to the ``implied-by'' strategy for executing consumers \cite{focus-ATMS}, as illustrated by the following example. Consider the agent $\IN \{A,B\} \DO (N \then M)$. Suppose the current store has the label $\{\{A\}, \{C\}\}$ for $N$ and the label $\{\}$ for $M$. To execute the agent, the environments of $N$ in the ``focus'' $\{A,B\}$ are selected; others are ignored. (In this case, only the environment $\{A\}$ is selected.) For each such environment the corresponding tell action is performed. Thus, the net result of executing the above agent is to update the label for $M$ to be $\{\{A\}\}$. Note that the same result would be obtained by expanding the agent, using the above theorem, to: $(\{A,B\}, N) \then (\{A,B\}, M)$. \paragraph{\all combinator.} Using \IN, we can define the ``\all'' combinator: the agent $$ \all A$$ is equivalent to the (infinite) conjunction of the agents $(\IN e \DO A)$ for all sets of assumptions $e$. It should be clear that the same result is obtained if the conjunction above is limited to environments that are consistent in the current store. (There is no point in firing an agent in an inconsistent environment since no more information can be added in that environment.) \begin{example} The agent $$ \all (N \then M)$$ is the agent which checks for {\em every} context whether $N$ holds in that context, and, if it does, adds $M$ to that context. \end{example} \subparagraph{Relationship with consumers.} $\all A$ behaves very much like a basic consumer in the ATMS-architecture. In fact, the agent $ \all (N \then M)$ can be executed by installing a justification with antecedent $N$ and consequent $M$: the usual ATMS-propagation algorithm will ensure that in each minimal, consistent environment in which $N$ holds, $M$ will also hold, thus providing a simple, finitary implementation. {\em Conditional constraint consumers} can now readily be translated, as illustrated by the following example: \begin{example} Consider the following model of an AND-gate: \begin{ccprogram} \agent and(X,A,B):: (A=0 \then X=0), (B=0 \then X=0), (A=1 \then X=B), (B=1 \then X=A), (X=1 \then (A=1,B=1)).. \agent and\_gate(M, X, A, B):: M=ok \then and(X,A,B), M=stuck\_at\_zero \then X=0, M=stuck\_at\_one \then X=1.. \end{ccprogram} A typical invocation of such a device would be: $${\tt \all and\_gate(M,X,A,B)}$$ Operationally, such an agent checks for every environment $e$ whether one of the constraints ${\tt M=ok}, {\tt M=stuck\_at\_zero},{\tt M=stuck\_at\_one}$ holds in $e$. If so, the corresponding action is taken --- in the first case, the agent {\tt and(X,A,B)} is executed (under the assumption $e$), and in the latter two cases, the corresponding constraint is added to the store, contingent on $e$. \end{example} As discussed earlier, antecedents to justifications can only consist of nodes, not basic constraints. We can obtain the effect of basic constraints in antecedents, however, using ask operations. Suppose it is desired to use the basic constraint $d$ in an antecedent. The effect can be achieved by introducing a node $n_d$ corresponding to $d$, running the agent $\all (d \then n_d)$ (recall that this is shorthand for $\all (\emptyset, d) \then (\emptyset, n_d)$) and using $n_d$ wherever it is desired to use $d$ in an antecedent. As a result, in any context in which $d$ can be shown to hold, the node $n_d$ will be labelled ``in'', as desired. \section{Conclusion} Above, we have discussed the notion of a general determinate \cc{} language on top of an ATMS, given a precise description of the combinators, and shown how various ``consumer-based'' programming idioms in ATMS-based languages can be expressed in this set-up. The addition of disjunctive agents to the above set-up does not present any new problems --- it is straightforward to consider the denotation of every agent to be a closure operator over the Smyth-powerdomain and extend the two new combinators (\all{} and \IN{}) to that setting. Indeed, disjunctive agents are needed in order to capture the ``choose'' construct of ATMS-based languages, and hence account for ``interpretation-construction'' in this framework. Similarly, the addition of existential quantification is straightforward. Semantically, this requires us to move from simple constraint systems to {\em first-order} constraint systems (which are sets of tokens equipped with an entailment relation and also a notion of substitution and existential quantifiers). Operationally, existential quantifiers provide a simple and elegant way to create new nodes. %%Advantages of the this style: %%do not face the conceptual problem that there may be an %%all-powerful language around the TMS which could in principle %%allow computations to be written that examine the representation %%of various constructs and possibly violate the given abstractions. %% Notion of classes seems to have disappeared. Several tasks remain to be done. Conceptually, there does not seem to be any difficulty in generalizing to a {\em first-order ATMS}, that is, to a setting where assumptions are first-order constraints rather than just propositional variables. For example, we may want to consider each of $\tt Mode=ok$, $\tt Mode = stuck\_at\_zero$, $\tt Mode=stuck\_at\_one$ as different assumptions, and allow them to be appear in labels. Implementationally, we will have to allow the possibility that an environment may be inconsistent not because it subsumes a no-good but because it is {\em internally} inconsistent. For example, the environment $\tt \{Mode = stuck\_at\_zero, Mode = ok\}$ is internally inconsistent (because it is an unsatifiable constraint). As another example, we could allow assumptions to be positive or negative propositional literals, thus requiring the implementation to have a full-scale Clause Management System (CMS) \cite{ATMS-found}. Further practical exploration of these possibilities is warranted. Practical implementations of such languages --- over a non-trivial constraint system --- need to be developed. Perhaps the simplest non-trivial example would be to consider a first-order constraint language with constants and equality; the next step would be to allow constructors, thus getting the constraint system \Herbrand. We hope to report on such implementations in future work. \paragraph{Acknowledgements.} Conversations with Philippe Codognet have been very helpful. {%%\footnotesize %%\bibliography{/net/tigger/tigger/saraswat/bib/master-0} %%\bibliographystyle{alpha} \newcommand{\etalchar}[1]{$^{#1}$} \begin{thebibliography}{GKK{\etalchar{+}}80} \bibitem[deK86a]{ATMS-1} J.~deKleer. \newblock An assumption based {TMS}. \newblock {\em Artifical Intelligence}, 28:127--162, 1986. \bibitem[deK86b]{ATMS-2} J.~deKleer. \newblock Extending the {ATMS}. \newblock {\em Artifical Intelligence}, 28:163--196, 1986. \bibitem[deK86c]{ATMS-3} J.~deKleer. \newblock Problem solving with the {ATMS}. \newblock {\em Artifical Intelligence}, 28:197--224, 1986. \bibitem[dKS80]{SYN} Johan de~Kleer and Gerald~Jay Sussman. \newblock Propagation of constraints applied to circuit synthesis. \newblock {\em Circuit Theory and Applications}, 8:127 -- 144, 1980. \bibitem[Doy79]{TMS} Jon Doyle. \newblock A truth maintenance system. \newblock {\em Artificial Intelligence}, 12:231--272, 1979. \bibitem[FdK88]{focus-ATMS} Kenneth~D. Forbus and Johan de~Kleer. \newblock Focusing the {ATMS}. \newblock In {\em AAAI 88}, 1988. \bibitem[GKK{\etalchar{+}}80]{lattices} G.Gierz, K.H.Hoffman, K.Keimel, J.D.Lawson, M.Mislove, and D.S.Scott, editors. \newblock {\em A compendium of continuous lattices}. \newblock Springer-Verlag Berlin Heidelberg New York, 1980. \bibitem[Hen89]{hentenryck} Pascal~Van Hentenryck. \newblock {\em Constraint satisfaction in logic programming}. \newblock Logic Programming Series. MIT Press, 1989. \bibitem[JL87]{lassez-const} Joxan Jaffar and Jean-Louis Lassez. \newblock Constraint logic programming. \newblock In {\em Proceedings of the SIGACT-SIGPLAN Symposium on Principles of Programming Languages}, pages 111--119. ACM, January 1987. \bibitem[JSS91]{angelic-nondet} R.~Jagadeesan, V.~Shanbhogue, and V.~Saraswat. \newblock Angelic non-determinism in concurrent constraint programming. \newblock Technical report, System Sciences Laboratory, Xerox PARC, January 1991. \bibitem[Mah87]{alps} Michael Maher. \newblock Logic semantics for a class of committed-choice programs. \newblock In {\em 4th International Conference on Logic Programming}. MIT Press, May 1987. \bibitem[McA90]{McAllester-90} David McAllester. \newblock Truth maintenance. \newblock In {\em Proceedings of AAAI}, pages 1109--1116, July 1990. \bibitem[RdK87]{ATMS-found} Raymond Reiter and Johan de~Kleer. \newblock Foundations of assumption-based truth maintenance systems: a preliminary report. \newblock In {\em Proceedings of AAAI-87}, pages 183 -- 188, August 1987. \bibitem[Sar87]{cp-op-sem} Vijay~A. Saraswat. \newblock The concurrent logic programming language {\bf cp}: definition and operational semantics. \newblock In {\em Proceedings of the SIGACT-SIGPLAN Symposium on Principles of Programming Languages}, pages 49--62. ACM, January 1987. \bibitem[Sar89]{my-thesis-book} Vijay~A. Saraswat. \newblock {\em Concurrent Constraint Programming Languages}. \newblock PhD thesis, Carnegie-Mellon University, January 1989. \newblock To appear, Doctoral Dissertation Award and Logic Programming Series, MIT Press, 1990. \bibitem[Sco76]{scott-SIAM} Dana~S. Scott. \newblock Data types as lattices. \newblock {\em SIAM}, 5(3):522--587, 1976. \bibitem[Sco82]{scott-info-sys} Dana~S. Scott. \newblock Domains for denotational semantics. \newblock In {\em Proceedings of ICALP}, 1982. \bibitem[SR90]{popl90} Vijay~A. Saraswat and Martin Rinard. \newblock Concurrent constraint programming. \newblock In {\em Proceedings of Seventeenth ACM Symposium on Principles of Programming Languages, San Fransisco}, January 1990. \bibitem[SRH91]{local-cons} Vijay~A. Saraswat, Fransesca Rossi, and Pascal~Van Hentenryck. \newblock Towards a general framework for constraint programming. \newblock Technical report, System Sciences Laboratory, Xerox PARC, February 1991. \bibitem[SRP91]{popl91} Vijay~A. Saraswat, Martin Rinard, and Prakash Panangaden. \newblock Semantic foundations of concurrent constraint programming. \newblock In {\em Proceedings of Eighteenth ACM Symposium on Principles of Programming Languages, Orlando}, January 1991. \bibitem[Ste80]{stefik} Mark~Jeffrey Stefik. \newblock {\em Planning with constraints}. \newblock PhD thesis, Stanford University, January 1980. \bibitem[Tar56]{tarski-closure} A.~Tarski. \newblock {\em Logics, semantics and meta-mathematics}. \newblock Oxford University Press, 1956. \newblock Translated by J.H. Woodger. \end{thebibliography} } \appendix \section{Closure operators}\label{clop} %%Taken from the local consistency paper. An operator over a partial order that is extensive, idempotent and monotone is called a {\em closure operator} (or, more classicaly, a {\em consequence} operator, \cite{tarski-closure}). Closure operators are extensively studied in \cite{lattices}; continuous closure operators have been used in \cite{scott-SIAM} to characterize data-types. We list here some basic properties. Every closure operator can be uniquely represented by its range (which is the same as its set of fixed points); the function can be recovered by mapping each input to the least element in the range above it. (In the following, we shall often confuse a closure operator with its range, writing $x\in f$ to mean $f(x)=x$.) In fact, a subset of $|D|$ is the range of a closure operator iff it is closed under glbs of arbitrary subsets. (Thus, the range of every closure operator is non-empty, since it must contain $\top_{|D|}=\glb \emptyset$.) This condition --- which we shall call the {\em determinacy condition} in what follows --- corresponds to the requirement that for any input there should be a {\em unique} least fixed-point of the given function above that input. Also, it can be shown that a closure operator is continuous iff its range is closed under the lubs of directed subsets. The extensional partial order on closure operators ($f \leq g$ iff for all $x$, $f(x) \leq g(x)$) translates into reverse-inclusion on the set of fixed points: $f \leq g$ iff $f \supseteq g$. The identity operator $I$ (with range $|D|$) is the bottom element; the operator which maps every element to $\top$ (and hence has range $\{\top\}$) is the top element. In fact it is easy to see that the set of closure operators over $|D|$ ordered by $\supseteq$ is a complete (algebraic) lattice, with meets given by $P \sqcap Q = \{c \sqcap d \alt c,d \in P \cup Q\}$ and joins by $P \sqcup Q = P \cap Q$. As we shall see, joins correspond to parallel composition, and one of the contributions of this paper is to point out that meets too have a very interesting computational interpretation. In the presence of angelic non-determinism (disjunction), it becomes necessary to require the domain of denotations to be closed under a disjunction operation. When initiated in a store, such an operation can return multiple solutions --- hence it becomes necessary to take the denotation to be some kind of mapping from $|D|$ to sets of $|D|$. As discussed in \cite{angelic-nondet}, the technically simplest solution is to take the domain of denotations to be the set of linear closure operators on the disjunctive power-domain $\PP{(|D|)}$ (also called the {\em Smyth power-domain}) of the underlying constraint system. Roughly, this power-domain has as elements upwards-closed subsets of $|D|$, ordered by reverse set-inclusion.\footnote{More precisely, the elements of the power-domain are required to be Scott-compact, that is, compact in the Scott-topology.} A {\em linear} (also called {\em distributive}) closure operator over such a domain is a closure operator satisfying the property $f(S)=\cup_{s\in S} f(\{s\})$\footnote{Thus, linear functions do not combine the information present in two or more elements in the input set, but treat them separately.}. Intuitively, $f(S)$ is the set of outputs of a process obtained by taking the union of the outputs produced by the process when started on any store $s \in S$. For the present discussion, the crucial property of such closure operators that they too can be characterized by their ``resting points'' --- those inputs $\up{c}$ (for $c \in |D|$) which are fixed-points. Given such a set $f_S$, the function $f$ can be recovered as the unique linear extension of the function $\lambda c. \up{f_S \cap \up{c}}$. In fact, \cite{angelic-nondet} shows that the set of such closure operators is in one-to-one correspondence with the set of all Scott-compact subsets of $|D|$. The important point here is that no condition analogous to the determinacy condition (for closure operators over $|D|$) is required --- -for instead of requiring that above any input there should be a {\em least} fixed-point, we can now take the result to be the (upwards-closure of) the set of {\em all} fixed-points above the input. The set of linear closure operators over $\PP{(|D|)}$ forms a distributive algebraic lattice with lubs given by intersection (of singleton fixed-point sets), and glbs by union. \end{document}