@@ -24,7 +24,7 @@ class KBBase(ABC): | |||
list so that each aligns with its corresponding index in the base model: the first with | |||
the 0th index, the second with the 1st, and so forth. | |||
max_err : float, optional | |||
The upper tolerance limit when comparing the similarity between a candidate's reasoning | |||
The upper tolerance limit when comparing the similarity between a pseudo label sample's reasoning | |||
result and the ground truth. This is only applicable when the reasoning result is of a numerical type. | |||
This is particularly relevant for regression problems where exact matches might not be | |||
feasible. Defaults to 1e-10. | |||
@@ -67,7 +67,7 @@ class KBBase(ABC): | |||
@abstractmethod | |||
def logic_forward(self, pseudo_label): | |||
""" | |||
How to perform (deductive) logical reasoning, i.e. matching each pseudo label to | |||
How to perform (deductive) logical reasoning, i.e. matching each pseudo label sample to | |||
their reasoning result. Users are required to provide this. | |||
Parameters | |||
@@ -95,14 +95,14 @@ class KBBase(ABC): | |||
Returns | |||
------- | |||
List[List[Any]] | |||
A list of candidates, i.e. revised pseudo labels that are compatible with the | |||
A list of candidates, i.e. revised pseudo label samples that are compatible with the | |||
knowledge base. | |||
""" | |||
return self._abduce_by_search(pseudo_label, y, max_revision_num, require_more_revision) | |||
def _check_equal(self, logic_result, y): | |||
""" | |||
Check whether the reasoning result of a candidate is equal to the ground truth | |||
Check whether the reasoning result of a pseduo label sample is equal to the ground truth | |||
(or, within the maximum error allowed for numerical results). | |||
Returns | |||
@@ -120,7 +120,7 @@ class KBBase(ABC): | |||
def revise_at_idx(self, pseudo_label, y, revision_idx): | |||
""" | |||
Revise the pseudo label at specified index positions. | |||
Revise the pseudo label sample at specified index positions. | |||
Parameters | |||
---------- | |||
@@ -134,7 +134,7 @@ class KBBase(ABC): | |||
Returns | |||
------- | |||
List[List[Any]] | |||
A list of candidates, i.e. revised pseudo labels that are compatible with the | |||
A list of candidates, i.e. revised pseudo label samples that are compatible with the | |||
knowledge base. | |||
""" | |||
candidates = [] | |||
@@ -149,7 +149,7 @@ class KBBase(ABC): | |||
def _revision(self, revision_num, pseudo_label, y): | |||
""" | |||
For a specified number of pseudo label to revise, iterate through all possible | |||
For a specified number of labels in a pseudo label sample to revise, iterate through all possible | |||
indices to find any candidates that are compatible with the knowledge base. | |||
""" | |||
new_candidates = [] | |||
@@ -164,7 +164,7 @@ class KBBase(ABC): | |||
def _abduce_by_search(self, pseudo_label, y, max_revision_num, require_more_revision): | |||
""" | |||
Perform abductive reasoning by exhastive search. Specifically, begin with 0 and | |||
continuously increase the number of pseudo labels to revise, until candidates | |||
continuously increase the number of labels in a pseudo label sample to revise, until candidates | |||
that are compatible with the knowledge base are found. | |||
Parameters | |||
@@ -177,13 +177,13 @@ class KBBase(ABC): | |||
The upper limit on the number of revisions. | |||
require_more_revision : int | |||
If larger than 0, then after having found any candidates compatible with the | |||
knowledge base, continue to increase the number pseudo labels to revise to | |||
knowledge base, continue to increase the number of labels in a pseudo label sample to revise to | |||
get more possible compatible candidates. | |||
Returns | |||
------- | |||
List[List[Any]] | |||
A list of candidates, i.e. revised pseudo label that are compatible with the | |||
A list of candidates, i.e. revised pseudo label samples that are compatible with the | |||
knowledge base. | |||
""" | |||
candidates = [] | |||
@@ -226,7 +226,7 @@ class GroundKB(KBBase): | |||
pseudo_label_list : list | |||
Refer to class `KBBase`. | |||
GKB_len_list : list | |||
List of possible lengths of pseudo label. | |||
List of possible lengths for a pseudo label sample. | |||
max_err : float, optional | |||
Refer to class `KBBase`. | |||
@@ -301,7 +301,7 @@ class GroundKB(KBBase): | |||
Returns | |||
------- | |||
List[List[Any]] | |||
A list of candidates, i.e. revised pseudo labels that are compatible with the | |||
A list of candidates, i.e. revised pseudo label samples that are compatible with the | |||
knowledge base. | |||
""" | |||
if self.GKB == {} or len(pseudo_label) not in self.GKB_len_list: | |||
@@ -468,7 +468,7 @@ class PrologKB(KBBase): | |||
Returns | |||
------- | |||
List[List[Any]] | |||
A list of candidates, i.e. revised pseudo labels that are compatible with the | |||
A list of candidates, i.e. revised pseudo label samples that are compatible with the | |||
knowledge base. | |||
""" | |||
candidates = [] | |||
@@ -20,9 +20,9 @@ class Reasoner: | |||
The distance function to be used when determining the cost list between each | |||
candidate and the given prediction. Valid options include: "confidence" (default) | | |||
"hamming". For "confidence", it calculates the distance between the prediction | |||
and candidate based on confidence derived from the predicted probability in the | |||
data sample.For "hamming", it directly calculates the Hamming distance between | |||
the predicted pseudo label in the data sample and candidate. | |||
and the candidate based on confidence derived from the predicted probabilities in the | |||
data sample. For "hamming", it directly calculates the Hamming distance between | |||
the predicted pseudo label sample and the candidate. | |||
mapping : dict, optional | |||
A mapping from index in the base model to label. If not provided, a default | |||
order-based mapping is created. | |||
@@ -198,8 +198,8 @@ class Reasoner: | |||
Returns | |||
------- | |||
List[Any] | |||
A revised pseudo label through abductive reasoning, which is compatible with the | |||
knowledge base. | |||
A revised pseudo label sample through abductive reasoning, which is compatible | |||
with the knowledge base. | |||
""" | |||
symbol_num = data_sample.elements_num("pred_pseudo_label") | |||
max_revision_num = self._get_max_revision_num(self.max_revision, symbol_num) | |||
@@ -3,7 +3,7 @@ MNIST Addition | |||
MNIST Addition was first introduced in [1] and the inputs of this task are pairs of MNIST images and the outputs are their sums. The dataset looks like this: | |||
.. image:: ../img/Datasets_1.png | |||
.. image:: ../img/image_1.jpg | |||
:width: 350px | |||
:align: center | |||
@@ -11,5 +11,9 @@ MNIST Addition was first introduced in [1] and the inputs of this task are pairs | |||
The ``gt_pseudo_label`` is only used to test the performance of the machine learning model and is not used in the training phase. | |||
In the Abductive Learning framework, the inference process is as follows: | |||
.. image:: ../img/image_2.jpg | |||
:width: 700px | |||
[1] Robin Manhaeve, Sebastijan Dumancic, Angelika Kimmig, Thomas Demeester, and Luc De Raedt. Deepproblog: Neural probabilistic logic programming. In Advances in Neural Information Processing Systems 31 (NeurIPS), pages 3749-3759.2018. |
@@ -24,8 +24,8 @@ AI: data, models, and knowledge. | |||
**Data** module manages the storage, operation, and evaluation of data. | |||
It first features class ``ListData`` (inherited from base class | |||
``BaseDataElement``), which defines the data structures used in | |||
Abductive Learning, and comprises common data operations like addition, | |||
deletion, retrieval, and slicing. Additionally, a series of Evaluation | |||
Abductive Learning, and comprises common data operations like insertion, | |||
deletion, retrieval, slicing, etc. Additionally, a series of Evaluation | |||
Metrics, including class ``SymbolMetric`` and ``SemanticsMetric`` (both | |||
specialized metrics derived from base class ``BaseMetric``), outline | |||
methods for evaluating model quality from a data perspective. | |||
@@ -37,7 +37,7 @@ model, which may incorporate models such as those based on Scikit-learn | |||
or a neural network framework using constructed by class ``BasicNN``. | |||
**Reasoning** module consists of the reasoning part of the Abductive | |||
learning. The class ``KBBase`` allows users to instantiate domain | |||
learning. The class ``KBBase`` allows users to define domain | |||
knowledge base. For diverse types of knowledge, we also offer | |||
implementations like ``GroundKB`` and ``PrologKB``, e.g., the latter | |||
enables knowledge base to be imported in the form of a Prolog files. | |||
@@ -46,7 +46,7 @@ responsible for minimizing the inconsistency between the knowledge base | |||
and learning models. | |||
Finally, the integration of these three modules occurs through | |||
**Bridge** module, which featurs class ``SimpleBridge`` (inherited from base | |||
**Bridge** module, which features class ``SimpleBridge`` (inherited from base | |||
class ``BaseBridge``). Bridge module synthesize data, learning, and | |||
reasoning, and facilitate the training and testing of the entire | |||
Abductive Learning framework. | |||
@@ -55,13 +55,14 @@ Use ABL-Package Step by Step | |||
---------------------------- | |||
In a typical Abductive Learning process, as illustrated below, | |||
data inputs are first mapped to pseudo labels through a machine learning model. | |||
These pseudo labels then pass through a knowledge base :math:`\mathcal{KB}` | |||
data inputs are first predicted by a machine learning model, and the outcomes are a pseudo label | |||
sample (which consists of multiple pseudo labels). | |||
These labels then pass through a knowledge base :math:`\mathcal{KB}` | |||
to obtain the reasoning result by deductive reasoning. During training, | |||
alongside the aforementioned forward flow (i.e., prediction --> deduction reasoning), | |||
there also exists a reverse flow, which starts from the reasoning result and | |||
involves abductive reasoning to generate pseudo labels. | |||
Subsequently, these labels are processed to minimize inconsistencies with machine learning, | |||
involves abductive reasoning to generate possible pseudo label samples. | |||
Subsequently, these samples are processed to minimize inconsistencies with machine learning, | |||
which in turn revise the outcomes of the machine learning model, and then | |||
fed back into the machine learning model for further training. | |||
To implement this process, the following five steps are necessary: | |||
@@ -74,15 +75,15 @@ To implement this process, the following five steps are necessary: | |||
2. Build the learning part | |||
Build a model that defines how to map input to pseudo labels. | |||
Build a model that can predict inputs to pseudo labels. | |||
Then, use ``ABLModel`` to encapsulate the model. | |||
3. Build the reasoning part | |||
Build a knowledge base by building a subclass of ``KBBase``, defining how to | |||
map pseudo labels to reasoning results. | |||
Also, instantiate a ``Reasoner`` for minimizing of inconsistencies | |||
between the knowledge base and pseudo labels. | |||
Define a knowledge base by building a subclass of ``KBBase``, specifying how to | |||
map pseudo label samples to reasoning results. | |||
Also, create a ``Reasoner`` for minimizing of inconsistencies | |||
between the knowledge base and the learning part. | |||
4. Define Evaluation Metrics | |||
@@ -90,5 +91,5 @@ To implement this process, the following five steps are necessary: | |||
5. Bridge machine learning and reasoning | |||
Use ``SimpleBridge`` to bridge the machine learning and reasoning part | |||
Use ``SimpleBridge`` to bridge the learning and reasoning part | |||
for integrated training and testing. |
@@ -9,8 +9,7 @@ | |||
Quick Start | |||
=========== | |||
This section runs through the API for the neural-symbolic task, MNITST Add. Refer to the links in each section to dive deeper. | |||
We use the MNIST Addition task as a quick start example. In this task, the inputs are pairs of MNIST handwritten images, and the outputs are their sums. Refer to the links in each section to dive deeper. | |||
Working with Data | |||
----------------- | |||
@@ -56,6 +55,7 @@ ABL-Package assumes ``X`` to be of type ``List[List[Any]]``, ``gt_pseudo_label`` | |||
Out: | |||
.. code-block:: none | |||
:class: code-out | |||
Length of X List[List[Any]]: 30000 | |||
Length of gt_pseudo_label List[List[Any]]: 30000 | |||
@@ -73,7 +73,7 @@ ABL-Package offers several `dataset classes <../API/abl.dataset.html>`_ for diff | |||
Read more about `preparing datasets <Datasets.html>`_. | |||
Building the Learning Part | |||
---------------------------------- | |||
-------------------------- | |||
To build the machine learning part, we need to wrap our machine learning model into the ``ABLModel`` class. The machine learning model can either be a scikit-learn model or a PyTorch neural network. We use a simple LeNet5 in the MNIST Addition example. | |||
@@ -103,9 +103,10 @@ Aside from the network, we need to define a criterion, an optimizer, and a devic | |||
pred_prob = base_model.predict_proba(X=[torch.randn(1, 28, 28).to(device) for _ in range(32)]) | |||
print(f"Shape of pred_prob : {pred_prob.shape}") | |||
Out: | |||
Out: | |||
.. code-block:: none | |||
:class: code-out | |||
Shape of pred_idx : (32,) | |||
Shape of pred_prob : (32, 10) | |||
@@ -123,9 +124,9 @@ Read more about `building the learning part <Learning.html>`_. | |||
Building the Reasoning Part | |||
--------------------------- | |||
To build the reasoning part, we first build a knowledge base by | |||
creating a subclass of ``KBBase``, which defines how to map pseudo | |||
labels to reasoning results. In the subclass, we initialize the | |||
To build the reasoning part, we first define a knowledge base by | |||
creating a subclass of ``KBBase``, which specifies how to map a pseudo | |||
label sample to its reasoning result. In the subclass, we initialize the | |||
``pseudo_label_list`` parameter and override the ``logic_forward`` | |||
function specifying how to perform (deductive) reasoning. | |||
@@ -142,7 +143,7 @@ function specifying how to perform (deductive) reasoning. | |||
kb = AddKB(pseudo_label_list=list(range(10))) | |||
Then, we create a reasoner by defining an instance of class | |||
Then, we create a reasoner by instantiating the class | |||
``Reasoner`` and passing the knowledge base as an parameter. | |||
The reasoner can be used to minimize inconsistencies between the | |||
knowledge base and the prediction from the learning part. | |||
@@ -161,8 +162,6 @@ Building Evaluation Metrics | |||
ABL-Package provides two basic metrics, namely ``SymbolMetric`` and ``SemanticsMetric``, which are used to evaluate the accuracy of the machine learning model's predictions and the accuracy of the ``logic_forward`` results, respectively. | |||
In the case of MNIST Addition example, the metric definition looks like | |||
.. code:: python | |||
from abl.evaluation import SemanticsMetric, SymbolMetric | |||
@@ -192,7 +191,8 @@ Finally, we proceed with training and testing. | |||
Training log would be similar to this: | |||
.. code-block:: none | |||
:class: code-out | |||
2023/12/02 21:26:57 - abl - INFO - Abductive Learning on the MNIST Addition example. | |||
2023/12/02 21:32:20 - abl - INFO - Abductive Learning on the MNIST Addition example. | |||
2023/12/02 21:32:51 - abl - INFO - loop(train) [1/5] segment(train) [1/3] model loss is 1.85589 | |||
@@ -10,29 +10,34 @@ | |||
Reasoning part | |||
=============== | |||
In ABL-Package, constructing the reasoning part involves two steps: | |||
In ABL-Package, building the reasoning part involves two steps: | |||
1. Build a knowledge base by creating a subclass of ``KBBase``, which | |||
defines how to map pseudo labels to reasoning results. | |||
2. Define a reasoner by creating an instance of class ``Reasoner`` | |||
specifies how to map pseudo label samples to reasoning results. | |||
2. Create a reasoner by instantiating the class ``Reasoner`` | |||
to minimize inconsistencies between the knowledge base and pseudo | |||
labels predicted by the learning part. | |||
Step 1: Build a knowledge base | |||
------------------------------ | |||
Building a knowledge base | |||
------------------------- | |||
Generally, we can create a subclass inherited from ``KBBase`` to build our own | |||
knowledge base. In addition, ABL-Package also offers several predefined | |||
subclasses of ``KBBase`` (e.g., ``PrologKB`` and ``GroundKB``), | |||
which we can utilize to build our knowledge base more conveniently. | |||
Build your knowledge base from `KBBase` | |||
Building a knowledge base from `KBBase` | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
Generally, users can inherit from class ``KBBase`` to build your own | |||
knowledge base. For the user-built KB (an inherited subclass), it's only | |||
required to initialize the ``pseudo_label_list`` parameter | |||
For the user-built KB from `KBBase` (an inherited subclass), it's only | |||
required to pass the ``pseudo_label_list`` parameter in the ``__init__`` function | |||
and override the ``logic_forward`` function: | |||
- **pseudo_label_list** is the list of possible pseudo labels (also, | |||
the output of the machine learning model). | |||
- **logic_forward** defines how to perform (deductive) reasoning, | |||
i.e. matching each pseudo label to their reasoning result. | |||
i.e. matching each pseudo label sample (often consisting of multiple | |||
pseudo labels) to its reasoning result. | |||
After that, other operations, including how to perform abductive | |||
reasoning, will be **automatically** set up. | |||
@@ -42,8 +47,8 @@ MNIST Addition example | |||
As an example, the ``pseudo_label_list`` passed in MNIST Addition is all the | |||
possible digits, namely, ``[0,1,2,...,9]``, and the ``logic_forward`` | |||
is: “Add two pseudo labels to get the result.”. Therefore, the | |||
construction of the KB (``add_kb``) of MNIST Addition would be: | |||
should be: “Add the two labels in the pseudo label sample to get the result.”. Therefore, the | |||
construction of the KB (``add_kb``) for MNIST Addition would be: | |||
.. code:: python | |||
@@ -56,42 +61,61 @@ construction of the KB (``add_kb``) of MNIST Addition would be: | |||
add_kb = AddKB() | |||
and (deductive) reasoning in ``add_kb`` would be: | |||
.. code:: python | |||
pseudo_label_sample = [1, 2] | |||
reasoning_result = add_kb.logic_forward(pseudo_label_sample) | |||
print(f"Reasoning result of pseudo label sample {pseudo_label_sample} is {reasoning_result}.") | |||
Out: | |||
.. code:: none | |||
:class: code-out | |||
Reasoning result of pseudo label sample [1, 2] is 3 | |||
.. _other-par: | |||
Other optional parameters | |||
^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
You can also initialize the following parameters when building your | |||
We can also pass the following parameters in the ``__init__`` function when building our | |||
knowledge base: | |||
- **max_err** (float, optional), specifying the upper tolerance limit | |||
when comparing the similarity between a candidate's reasoning result | |||
when comparing the similarity between a pseudo label sample's reasoning result | |||
and the ground truth during abductive reasoning. This is only | |||
applicable when the reasoning result is of a numerical type. This is | |||
particularly relevant for regression problems where exact matches | |||
might not be feasible. Defaults to 1e-10. See :ref:`an example <kb-abd-2>`. | |||
- **use_cache** (bool, optional), indicating whether to use cache for | |||
previously abduced candidates to speed up subsequent abductive | |||
reasoning operations. Defaults to True. Defaults to True. | |||
- **use_cache** (bool, optional), indicating whether to use cache to store | |||
previous candidates (pseudo label samples generated from abductive reasoning) | |||
to speed up subsequent abductive reasoning operations. Defaults to True. | |||
For more information of abductive reasoning, please refer to :ref:`this <kb-abd>`. | |||
- **cache_size** (int, optional), specifying the maximum cache | |||
size. This is only operational when ``use_cache`` is set to True. | |||
Defaults to 4096. | |||
Diverse choices for building knowledge base | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
Building a knowledge base from Prolog file | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
In addition to building your own knowledge base through inheriting from class | |||
``KBBase``, ABL-Package also offers several predefined subclasses of ``KBBase``, | |||
which you can utilize to construct your knowledge base more conveniently. | |||
When aiming to leverage knowledge base from an external Prolog file | |||
(which contains how to perform reasoning), we can directly create an | |||
instance of class ``PrologKB``. Upon instantiation of | |||
``PrologKB``, we are required to pass the ``pseudo_label_list`` (same as ``KBBase``) | |||
and ``pl_file`` (the Prolog file) in the ``__init__`` function. | |||
Build your Knowledge base from Prolog file | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
.. admonition:: What is a Prolog file? | |||
For users aiming to leverage knowledge base from an external Prolog file | |||
(which contains how to perform reasoning), they can directly create an | |||
instance of class ``PrologKB``. Specifically, upon instantiation of | |||
``PrologKB``, users are required to provide the ``pseudo_label_list`` | |||
and ``pl_file`` (the Prolog file). | |||
A Prolog file is a script or source code file written in the Prolog language, | |||
which is a logic programming language where the logic is expressed in terms of | |||
relations, and represented as facts (basic assertions about some world) and | |||
rules (logical statements that describe the relationships between facts). | |||
A computation is initiated by running a query over these relations. | |||
Prolog files typically have the extension ``.pl``. See some Prolog examples | |||
in `SWISH <https://swish.swi-prolog.org/>`_. | |||
After the instantiation, other operations, including how to perform | |||
abductive reasoning, will also be **automatically** set up. | |||
@@ -101,13 +125,13 @@ abductive reasoning, will also be **automatically** set up. | |||
Note that to use the default logic forward and abductive reasoning | |||
methods in this class, the Prolog (.pl) file should contain a rule | |||
with a strict format: ``logic_forward(Pseudo_labels, Res).`` | |||
Otherwise, users might have to override ``logic_forward`` and | |||
Otherwise, we might have to override ``logic_forward`` and | |||
``get_query_string`` to allow for more adaptable usage. | |||
MNIST Addition example (cont.) | |||
""""""""""""""""""""""""""""""" | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
As an example, one can first write a Prolog file for the MNIST Addition | |||
As an example, we can first write a Prolog file for the MNIST Addition | |||
example as the following code, and then save it as ``add.pl``. | |||
.. code:: prolog | |||
@@ -120,13 +144,12 @@ Afterwards, the construction of knowledge base from Prolog file | |||
.. code:: python | |||
add_prolog_kb = PrologKB(pseudo_label_list=list(range(10)), | |||
pl_file="add.pl") | |||
add_prolog_kb = PrologKB(pseudo_label_list=list(range(10)), pl_file="add.pl") | |||
Build your Knowledge base with GKB from ``GroundKB`` | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
Building a knowledge base with GKB from ``GroundKB`` | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
Users can also inherit from class ``GroundKB`` to build their own | |||
We can also inherit from class ``GroundKB`` to build our own | |||
knowledge base. In this way, the knowledge built will have a Ground KB | |||
(GKB). | |||
@@ -137,19 +160,19 @@ knowledge base. In this way, the knowledge built will have a Ground KB | |||
result. The key advantage of having a Ground KB is that it may | |||
accelerate abductive reasoning. | |||
``GroundKB`` is a subclass of ``GKBBase``. Similar to ``KBBase``, users | |||
are required to initialize the ``pseudo_label_list`` parameter and | |||
``GroundKB`` is a subclass of ``GKBBase``. Similar to ``KBBase``, we | |||
are required to pass the ``pseudo_label_list`` parameter in the ``__init__`` function and | |||
override the ``logic_forward`` function, and are allowed to pass other | |||
:ref:`optional parameters <other-par>`. Additionally, users are required initialize the | |||
``GKB_len_list`` parameter. | |||
:ref:`optional parameters <other-par>`. Additionally, we are required pass the | |||
``GKB_len_list`` parameter in the ``__init__`` function. | |||
- **GKB_len_list** is the list of possible lengths of pseudo label. | |||
- **GKB_len_list** is the list of possible lengths for a pseudo label sample. | |||
After that, other operations, including auto-construction of GKB, and | |||
how to perform abductive reasoning, will be **automatically** set up. | |||
MNIST Addition example (cont.) | |||
""""""""""""""""""""""""""""""" | |||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |||
As an example, the ``GKB_len_list`` for MNIST Addition should be ``[2]``, | |||
since all pseudo labels in the example consist of two digits. Therefore, | |||
@@ -172,19 +195,19 @@ and whether an extra parameter ``GKB_len_list`` is passed. | |||
.. _kb-abd: | |||
Perform abductive reasoning in your knowledge base | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
Performing abductive reasoning in the knowledge base | |||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |||
As mentioned in :ref:`What is Abductive Reasoning? <abd>`, abductive reasoning | |||
enables the inference of candidate pseudo labels as potential | |||
enables the inference of candidates (which are pseudo label samples) as potential | |||
explanations for the reasoning result. Also, in Abductive Learning where | |||
an observation (a pseudo label predicted by the learning part) is | |||
an observation (a pseudo label sample predicted by the learning part) is | |||
available, we aim to let the candidate do not largely revise the | |||
previously identified pseudo label. | |||
previously identified pseudo label sample. | |||
``KBBase`` (also, ``GroundKB`` and ``PrologKB``) implement the method | |||
``abduce_candidates(pseudo_label, y, max_revision_num, require_more_revision)`` | |||
for conducting abductive reasoning, where the parameters are: | |||
for performing abductive reasoning, where the parameters are: | |||
- **pseudo_label**, the pseudo label sample to be revised by abductive | |||
reasoning, usually generated by the learning part. | |||
@@ -192,13 +215,13 @@ for conducting abductive reasoning, where the parameters are: | |||
returned candidates should be compatible with it. | |||
- **max_revision_num**, an int value specifying the upper limit on the | |||
number of revised labels for each sample. | |||
- **require_more_revision**, an int value specifiying additional number | |||
- **require_more_revision**, an int value specifying additional number | |||
of revisions permitted beyond the minimum required. (e.g., If we set | |||
it to 0, even if ``max_revision_num`` is set to a high value, the | |||
method will only output candidates with the minimum possible | |||
revisions.) | |||
And it return a list of candidates (i.e., revised pseudo labels) that | |||
And it return a list of candidates (i.e., revised pseudo label samples) that | |||
are all compatible with ``y``. | |||
MNIST Addition example (cont.) | |||
@@ -240,29 +263,29 @@ be higher, hence the candidates returned would be: | |||
| [1,1] | 11 | 1 | 0 | [[1,9], [9,1]] | | |||
+------------------+-------+----------------------+--------------------------+----------------+ | |||
Step 2: Create a reasoner | |||
------------------------- | |||
Creating a reasoner | |||
------------------- | |||
After building your knowledge base, the next step is defining a | |||
After building our knowledge base, the next step is creating a | |||
reasoner. Due to the indeterminism of abductive reasoning, there could | |||
be multiple candidates compatible to the knowledge base. When this | |||
happens, reasoner can minimize inconsistencies between the knowledge | |||
base and pseudo labels predicted by the learning part, and then return **only | |||
one** candidate which has highest consistency. | |||
You can create a reasoner simply by defining an instance of class | |||
``Reasoner`` and passing your knowledge base as an parameter. As an | |||
We can create a reasoner simply by instantiating class | |||
``Reasoner`` and passing our knowledge base as an parameter. As an | |||
example for MNIST Addition, the reasoner definition would be: | |||
.. code:: python | |||
reasoner_add = Reasoner(kb_add) | |||
When instantiating, besides the required knowledge base, you may also | |||
When instantiating, besides the required knowledge base, we may also | |||
specify: | |||
- **max_revision** (int or float, optional), specifies the upper limit | |||
on the number of revisions for each data sample when performing | |||
on the number of revisions for each sample when performing | |||
:ref:`abductive reasoning in the knowledge base <kb-abd>`. If float, denotes the | |||
fraction of the total length that can be revised. A value of -1 | |||
implies no restriction on the number of revisions. Defaults to -1. | |||
@@ -270,8 +293,8 @@ specify: | |||
number of revisions permitted beyond the minimum required when | |||
performing :ref:`abductive reasoning in the knowledge base <kb-abd>`. Defaults to | |||
0. | |||
- **use_zoopt** (bool, optional), indicating whether to use `ZOOpt library <https://github.com/polixir/ZOOpt>`_. | |||
It is a library for zeroth-order optimization that can be used to | |||
- **use_zoopt** (bool, optional), indicating whether to use the `ZOOpt library <https://github.com/polixir/ZOOpt>`_, | |||
which is a library for zeroth-order optimization that can be used to | |||
accelerate consistency minimization. Defaults to False. | |||
- **dist_func** (str, optional), specifying the distance function to be | |||
used when determining consistency between your prediction and | |||
@@ -0,0 +1,3 @@ | |||
div.code-out > div.highlight > pre { | |||
background-color: #d3effd !important; | |||
} |
@@ -48,6 +48,8 @@ pygments_style = "default" | |||
html_theme = "sphinx_rtd_theme" | |||
html_theme_options = {"display_version": True} | |||
html_static_path = ['_static'] | |||
html_css_files = ['custom.css'] | |||
# html_theme_path = ["../.."] | |||
# html_logo = "demo/static/logo-wordmark-light.svg" | |||
# html_show_sourcelink = True | |||