Browse Source

[DOC] complete reasoning

pull/1/head
troyyyyy 1 year ago
parent
commit
9c9908fbf5
6 changed files with 329 additions and 93 deletions
  1. +78
    -22
      abl/reasoning/kb.py
  2. +13
    -6
      abl/reasoning/reasoner.py
  3. +1
    -1
      docs/Intro/Basics.rst
  4. +1
    -1
      docs/Intro/Quick-Start.rst
  5. +234
    -63
      docs/Intro/Reasoning.rst
  6. +2
    -0
      docs/Overview/Abductive-Learning.rst

+ 78
- 22
abl/reasoning/kb.py View File

@@ -25,7 +25,7 @@ class KBBase(ABC):
the 0th index, the second with the 1st, and so forth.
max_err : float, optional
The upper tolerance limit when comparing the similarity between a candidate's logical
result. This is only applicable when the logical result is of a numerical type.
result and the ground truth. This is only applicable when the logical result is of a numerical type.
This is particularly relevant for regression problems where exact matches might not be
feasible. Defaults to 1e-10.
use_cache : bool, optional
@@ -77,9 +77,9 @@ class KBBase(ABC):
"""
pass

def abduce_candidates(self, pseudo_label, y, max_revision_num, require_more_revision=0):
def abduce_candidates(self, pseudo_label, y, max_revision_num, require_more_revision):
"""
Perform abductive reasoning to get a candidate consistent with the knowledge base.
Perform abductive reasoning to get a candidate compatible with the knowledge base.

Parameters
----------
@@ -89,14 +89,13 @@ class KBBase(ABC):
Ground truth of the logical result for the sample.
max_revision_num : int
The upper limit on the number of revised labels for each sample.
require_more_revision : int, optional
require_more_revision : int
Specifies additional number of revisions permitted beyond the minimum required.
Defaults to 0.

Returns
-------
List[List[Any]]
A list of candidates, i.e. revised pseudo labels that are consistent with the
A list of candidates, i.e. revised pseudo labels that are compatible with the
knowledge base.
"""
return self._abduce_by_search(pseudo_label, y, max_revision_num, require_more_revision)
@@ -105,6 +104,11 @@ class KBBase(ABC):
"""
Check whether the logical result of a candidate is equal to the ground truth
(or, within the maximum error allowed for numerical results).
Returns
-------
bool
The result of the check.
"""
if logic_result == None:
return False
@@ -126,6 +130,12 @@ class KBBase(ABC):
Ground truth of the logical result for the sample.
revision_idx : array-like
Indices of where revisions should be made to the pseudo label sample.
Returns
-------
List[List[Any]]
A list of candidates, i.e. revised pseudo labels that are compatible with the
knowledge base.
"""
candidates = []
abduce_c = product(self.pseudo_label_list, repeat=len(revision_idx))
@@ -140,7 +150,7 @@ class KBBase(ABC):
def _revision(self, revision_num, pseudo_label, y):
"""
For a specified number of pseudo label to revise, iterate through all possible
indices to find any candidates that are consistent with the knowledge base.
indices to find any candidates that are compatible with the knowledge base.
"""
new_candidates = []
revision_idx_list = combinations(range(len(pseudo_label)), revision_num)
@@ -155,7 +165,7 @@ class KBBase(ABC):
"""
Perform abductive reasoning by exhastive search. Specifically, begin with 0 and
continuously increase the number of pseudo labels to revise, until candidates
that are consistent with the knowledge base are found.
that are compatible with the knowledge base are found.

Parameters
----------
@@ -166,14 +176,14 @@ class KBBase(ABC):
max_revision_num : int
The upper limit on the number of revisions.
require_more_revision : int
If larger than 0, then after having found any candidates consistent with the
If larger than 0, then after having found any candidates compatible with the
knowledge base, continue to increase the number pseudo labels to revise to
get more possible consistent candidates.
get more possible compatible candidates.

Returns
-------
List[List[Any]]
A list of candidates, i.e. revised pseudo label that are consistent with the
A list of candidates, i.e. revised pseudo label that are compatible with the
knowledge base.
"""
candidates = []
@@ -271,13 +281,28 @@ class GroundKB(KBBase):
X, Y = zip(*sorted(zip(X, Y), key=lambda pair: pair[1]))
return X, Y

def abduce_candidates(self, pseudo_label, y, max_revision_num, require_more_revision=0):
def abduce_candidates(self, pseudo_label, y, max_revision_num, require_more_revision):
"""
Perform abductive reasoning by directly retrieving consistent candidates from
Perform abductive reasoning by directly retrieving compatible candidates from
the prebuilt GKB. In this way, the time-consuming exhaustive search can be
avoided.
This is an overridden function. For more information about the parameters and
returns, refer to the function of the same name in class `KBBase`.
Parameters
----------
pseudo_label : List[Any]
Pseudo label sample (to be revised by abductive reasoning).
y : any
Ground truth of the logical result for the sample.
max_revision_num : int
The upper limit on the number of revised labels for each sample.
require_more_revision : int, optional
Specifies additional number of revisions permitted beyond the minimum required.

Returns
-------
List[List[Any]]
A list of candidates, i.e. revised pseudo labels that are compatible with the
knowledge base.
"""
if self.GKB == {} or len(pseudo_label) not in self.GKB_len_list:
return []
@@ -295,7 +320,7 @@ class GroundKB(KBBase):

def _find_candidate_GKB(self, pseudo_label, y):
"""
Retrieve consistent candidates from the prebuilt GKB. For numerical logical results,
Retrieve compatible candidates from the prebuilt GKB. For numerical logical results,
return all candidates whose logical results fall within the
[y - max_err, y + max_err] range.
"""
@@ -375,6 +400,11 @@ class PrologKB(KBBase):
returned `Res` as the logical results. To use this default function, there must be
a Prolog `log_forward` method in the pl file to perform logical. reasoning.
Otherwise, users would override this function.
Parameters
----------
pseudo_label : List[Any]
Pseudo label sample.
"""
result = list(self.prolog.query("logic_forward(%s, Res)." % pseudo_labels))[0]["Res"]
if result == "true":
@@ -398,10 +428,23 @@ class PrologKB(KBBase):

def get_query_string(self, pseudo_label, y, revision_idx):
"""
Consult prolog with `logic_forward([kept_labels, Revise_labels], Res).`, and set
the returned `Revise_labels` together with the kept labels as the candidates. This is
a default fuction for demo, users would override this function to adapt to their own
Prolog file.
Get the query to be used for consulting Prolog.
This is a default fuction for demo, users would override this function to adapt to their own
Prolog file. In this demo function, return query `logic_forward([kept_labels, Revise_labels], Res).`.
Parameters
----------
pseudo_label : List[Any]
Pseudo label sample (to be revised by abductive reasoning).
y : any
Ground truth of the logical result for the sample.
revision_idx : array-like
Indices of where revisions should be made to the pseudo label sample.
Returns
-------
str
A string of the query.
"""
query_string = "logic_forward("
query_string += self._revision_pseudo_label(pseudo_label, revision_idx)
@@ -412,8 +455,21 @@ class PrologKB(KBBase):
def revise_at_idx(self, pseudo_label, y, revision_idx):
"""
Revise the pseudo label sample at specified index positions by querying Prolog.
This is an overridden function. For more information about the parameters, refer to
the function of the same name in class `KBBase`.
Parameters
----------
pseudo_label : List[Any]
Pseudo label sample (to be revised).
y : Any
Ground truth of the logical result for the sample.
revision_idx : array-like
Indices of where revisions should be made to the pseudo label sample.
Returns
-------
List[List[Any]]
A list of candidates, i.e. revised pseudo labels that are compatible with the
knowledge base.
"""
candidates = []
query_string = self.get_query_string(pseudo_label, y, revision_idx)


+ 13
- 6
abl/reasoning/reasoner.py View File

@@ -18,9 +18,11 @@ class ReasonerBase:
The knowledge base to be used for reasoning.
dist_func : str, optional
The distance function to be used when determining the cost list between each
candidate and the given prediction. Valid options include: "hamming" |
"confidence" (default). For detailed explanations of these options, refer to
`_get_cost_list`.
candidate and the given prediction. Valid options include: "confidence" (default) |
"hamming". For "confidence", it calculates the distance between the prediction
and candidate based on confidence derived from the predicted probability in the
data sample.For "hamming", it directly calculates the Hamming distance between
the predicted pseudo label in the data sample and candidate.
mapping : dict, optional
A mapping from index in the base model to label. If not provided, a default
order-based mapping is created.
@@ -79,7 +81,12 @@ class ReasonerBase:
data_sample : ListData
Data sample.
candidates : List[List[Any]]
Multiple consistent candidates.
Multiple compatible candidates.
Returns
-------
List[Any]
A selected candidate.
"""
if len(candidates) == 0:
return []
@@ -105,7 +112,7 @@ class ReasonerBase:
data_sample : ListData
Data sample.
candidates : List[List[Any]]
Multiple consistent candidates.
Multiple compatible candidates.
"""
if self.dist_func == "hamming":
return hamming_dist(data_sample.pred_pseudo_label, candidates)
@@ -195,7 +202,7 @@ class ReasonerBase:
Returns
-------
List[Any]
A revised pseudo label through abductive reasoning, which is consistent with the
A revised pseudo label through abductive reasoning, which is compatible with the
knowledge base.
"""
symbol_num = data_sample.elements_num("pred_pseudo_label")


+ 1
- 1
docs/Intro/Basics.rst View File

@@ -72,7 +72,7 @@ To implement this process, the following five steps are necessary:

Prepare the data's input, ground truth for pseudo labels (optional), and ground truth for logical results.

2. Build machine learning part
2. Build the learning part

Build a model that defines how to map input to pseudo labels.
Then, use ``ABLModel`` to encapsulate the model.


+ 1
- 1
docs/Intro/Quick-Start.rst View File

@@ -152,7 +152,7 @@ Then, we create a reasoner. Aside from the knowledge base, the instantiation of

from abl.reasoning import ReasonerBase
reasoner = ReasonerBase(kb, dist_func="confidence")
reasoner = ReasonerBase(kb)

Read more about `building the reasoning part <Reasoning.html>`_.



+ 234
- 63
docs/Intro/Reasoning.rst View File

@@ -10,37 +10,40 @@
Reasoning part
===============

In ABL-Package, there are two steps to construct the reasoning part:
In ABL-Package, constructing the reasoning part involves two steps:

1. Build a knowledge base by creating a subclass of ``KBBase``, which
defines how to map pseudo labels to logical results.
2. Define a reasoner by creating an instance of class ``ReasonerBase``
to minimize inconsistencies between the knowledge base and pseudo
labels.
labels predicted by the learning part.

Build a knowledge base
----------------------
Step 1: Build a knowledge base
------------------------------

Build your Knowledge base from ``KBBase``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Build your knowledge base from `KBBase`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Generally, users can inherit from class ``KBBase`` to build their own
knowledge base. For the user-build KB (an inherited subclass), it's only
required for the user to initialize the ``pseudo_label_list`` parameters
Generally, users can inherit from class ``KBBase`` to build your own
knowledge base. For the user-built KB (an inherited subclass), it’s only
required to initialize the ``pseudo_label_list`` parameter
and override the ``logic_forward`` function:

- ``pseudo_label_list`` is the list of possible pseudo labels (i.e.,
- **pseudo_label_list** is the list of possible pseudo labels (also,
the output of the machine learning model).
- ``logic_forward`` is how to perform (deductive) reasoning,
i.e. matching each pseudo label to their logical result.
- **logic_forward** defines how to perform (deductive) reasoning,
i.e. matching each pseudo label to their logical result.

After that, other operations, including how to perform abductive
reasoning, will be **automatically** set up.

As an example, the ``pseudo_label_list`` passed in MNISTAdd is all the
MNIST Add example
^^^^^^^^^^^^^^^^^

As an example, the ``pseudo_label_list`` passed in MNIST Add is all the
possible digits, namely, ``[0,1,2,...,9]``, and the ``logic_forward``
is: “Add two pseudo labels to get the result.”. Therefore, the
construction of the KB (``add_kb``) of MNISTAdd would be:
construction of the KB (``add_kb``) of MNIST Add would be:

.. code:: python

@@ -53,26 +56,75 @@ construction of the KB (``add_kb``) of MNISTAdd would be:

add_kb = AddKB()

.. _other-par:

Other optional parameters
^^^^^^^^^^^^^^^^^^^^^^^^^

The following parameters can also be passed in when building your
You can also initialize the following parameters when building your
knowledge base:

- ``max_err`` (float, optional), which is the upper tolerance limit
when comparing the similarity between a candidate's logical result
during abductive reasoning. This is only applicable when the logical
result is of a numerical type. This is particularly relevant for
regression problems where exact matches might not be feasible.
Defaults to 1e-10.
- ``use_cache`` (bool, optional), indicates whether to use cache for
- **max_err** (float, optional), specifying the upper tolerance limit
when comparing the similarity between a candidates logical result
and the ground truth during abductive reasoning. This is only
applicable when the logical result is of a numerical type. This is
particularly relevant for regression problems where exact matches
might not be feasible. Defaults to 1e-10. See :ref:`an example <kb-abd-2>`.
- **use_cache** (bool, optional), indicating whether to use cache for
previously abduced candidates to speed up subsequent abductive
reasoning operations. Defaults to True.
- ``max_cache_size`` (int, optional), The maximum cache size. This is
only operational when ``use_cache`` is set to True. Defaults to 4096.
reasoning operations. Defaults to True. Defaults to True.
- **max_cache_size** (int, optional), specifying the maximum cache
size. This is only operational when ``use_cache`` is set to True.
Defaults to 4096.

Diverse choices for building knowledge base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

In addition to building your own knowledge base through inheriting from class
``KBBase``, ABL-Package also offers several predefined subclasses of ``KBBase``,
which you can utilize to construct your knowledge base.

Build your Knowledge base from Prolog file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

For users aiming to leverage knowledge base from an external Prolog file
(which contain how to perform reasoning), they may directly creating an
instance of class ``PrologKB``. Specifically, upon instantiation of
``PrologKB``, users are required to provide the ``pseudo_label_list``
and ``pl_file`` (the Prolog file).

After the instantiation, other operations, including how to perform
abductive reasoning, will also be **automatically** set up.

.. warning::

Note that to use the default logic forward and abductive reasoning
methods in this class, the Prolog (.pl) file should contain a rule
with a strict format: ``logic_forward(Pseudo_labels, Res).``
Otherwise, users might have to override ``logic_forward`` and
``get_query_string`` to allow for more adaptable usage.

MNIST Add example (cont.)
"""""""""""""""""""""""""

As an example, one can first write a Prolog file for the MNISTAdd
example as the following code, and then save it as ``add.pl``.

.. code:: prolog

pseudo_label(N) :- between(0, 9, N).
logic_forward([Z1, Z2], Res) :- pseudo_label(Z1), pseudo_label(Z2), Res is Z1+Z2.

Afterwards, the construction of knowledge base from Prolog file
(``add_prolog_kb``) would be as follows:

.. code:: python

add_prolog_kb = PrologKB(pseudo_label_list=list(range(10)),
pl_file="add.pl")

Build your Knowledge base with GKB from ``GroundKB``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Users can also inherit from class ``GroundKB`` to build their own
knowledge base. In this way, the knowledge built will have a Ground KB
@@ -85,16 +137,20 @@ knowledge base. In this way, the knowledge built will have a Ground KB
result. The key advantage of having a Ground KB is that it may
accelerate abductive reasoning.

Similar to ``KBBase``, users are required to initialize the
``pseudo_label_list`` parameter and override the ``logic_forward``
function. Additionally, users should initialize the ``GKB_len_list``
parameter.
``GroundKB`` is a subclass of ``GKBBase``. Similar to ``KBBase``, users
are required to initialize the ``pseudo_label_list`` parameter and
override the ``logic_forward`` function, and are allowed to pass other
:ref:`optional parameters <other-par>`. Additionally, users are required initialize the
``GKB_len_list`` parameter.

- ``GKB_len_list`` is the list of possible lengths of pseudo label.
- **GKB_len_list** is the list of possible lengths of pseudo label.

After that, other operations, including auto-construction of GKB, and
how to perform abductive reasoning, will be **automatically** set up.

MNIST Add example (cont.)
"""""""""""""""""""""""""

As an example, the ``GKB_len_list`` for MNISTAdd should be ``[2]``,
since all pseudo labels in the example consist of two digits. Therefore,
the construction of KB with GKB (``add_ground_kb``) of MNISTAdd would be
@@ -114,41 +170,156 @@ and whether an extra parameter ``GKB_len_list`` is passed.
add_ground_kb = AddGroundKB()

Build your Knowledge base from Prolog file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

For users aiming to leverage knowledge base from an external Prolog file
(which contain how to perform reasoning), they may directly creating an
instance of class ``PrologKB``. Specifically, upon instantiation of
``PrologKB``, users are required to provide the ``pseudo_label_list``
and ``pl_file`` (the Prolog file).

After the instantiation, other operations, including how to perform
abductive reasoning, will also be **automatically** set up.

.. attention::

Note that in order to use the default logic forward and abductive reasoning
methods in this class ``PrologKB``, the Prolog (.pl) file should contain a rule
with a strict format: ``logic_forward(Pseudo_labels, Res).``
Otherwise, users might have to override ``logic_forward`` and
``get_query_string`` to allow for more adaptable usage.

As an example, one can first write a Prolog file for the MNISTAdd
example as the following code, and then save it as ``add.pl``.
.. _kb-abd:

Perform abductive reasoning in your knowledge base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

As mentioned in :ref:`What is Abductive Reasoning? <abd>`, abductive reasoning
enables the inference of candidate pseudo labels as potential
explanations for the logical result. Also, in Abductive Learning where
an observation (a pseudo label predicted by the learning part) is
available, we aim to let the candidate do not largely revise the
previously identified pseudo label.

``KBBase`` (also, ``GroundKB`` and ``PrologKB``) implement the method
``abduce_candidates(pseudo_label, y, max_revision_num, require_more_revision)``
for conducting abductive reasoning, where the parameters are:

- **pseudo_label**, the pseudo label sample to be revised by abductive
reasoning, usually generated by the learning part.
- **y**, the ground truth of the logical result for the sample. The
returned candidates should be compatible with it.
- **max_revision_num**, an int value specifying the upper limit on the
number of revised labels for each sample.
- **require_more_revision**, an int value specifiying additional number
of revisions permitted beyond the minimum required. (e.g. If we set
it to 0, even if ``max_revision_num`` is set to a high value, the
method will only output candidates with the minimum possible
revisions.)

And it return a list of candidates (i.e., revised pseudo labels) that
are all compatible with ``y``.

MNIST Add example (cont.)
^^^^^^^^^^^^^^^^^^^^^^^^^

.. code:: prolog
As an example, with MNIST Add, the candidates returned by
``add_kb.abduce_candidates`` would be as follows:

+--------------+-------+--------------+---------------+----------------+
| ``pseudo_ | ``y`` | ``max_re | ``require_ | Output |
| label`` | | vision_num`` | more_address``| |
+==============+=======+==============+===============+================+
| [1,1] | 8 | 1 | 0 | [[1,7], [7,1]] |
+--------------+-------+--------------+---------------+----------------+
| [1,1] | 8 | 1 | 1 | [[1,7], [7,1]] |
+--------------+-------+--------------+---------------+----------------+
| [1,1] | 8 | 2 | 0 | [[1,7], [7,1]] |
+--------------+-------+--------------+---------------+----------------+
| [1,1] | 8 | 2 | 1 | [[1,7], |
| | | | | [7,1], [2,6], |
| | | | | [6,2], [3,5], |
| | | | | [5,3], [4,4]] |
+--------------+-------+--------------+---------------+----------------+
| [1,1] | 11 | 1 | 0 | [] |
+--------------+-------+--------------+---------------+----------------+

.. _kb-abd-2:

As another example, if we set the ``max_err`` of ``AddKB`` to be 1
instead of the default 1e-10, the tolerance limit for consistency will
be higher, hence the candidates returned would be:

+--------------+-------+--------------+---------------+----------------+
| ``pseudo_ | ``y`` | ``max_re | ``require_ | Output |
| label`` | | vision_num`` | more_address``| |
+==============+=======+==============+===============+================+
| [1,1] | 8 | 1 | 0 | [[1,7], [7,1], |
| | | | | [1,6], [6,1], |
| | | | | [1,8], [8,1]] |
+--------------+-------+--------------+---------------+----------------+
| [1,1] | 11 | 1 | 0 | [[1,9], [9,1]] |
+--------------+-------+--------------+---------------+----------------+

Step 2: Create a reasoner
-------------------------

After building your knowledge base, the next step is defining a
reasoner. Due to the indeterminism of abductive reasoning, there could
be multiple candidates compatible to the knowledge base. When this
happens, reasoner can minimize inconsistencies between the knowledge
base and pseudo labels predicted by the learning part and return **only
one** candidate which has highest consistency.

You can create a reasoner simply by defining an instance of class
``ReasonerBase`` and passing your knowledge base as an parameter. As an
example for MNIST Add, the reasoner definition would be:

pseudo_label(N) :- between(0, 9, N).
logic_forward([Z1, Z2], Res) :- pseudo_label(Z1), pseudo_label(Z2), Res is Z1+Z2.
.. code:: python

Afterwards, the construction of knowledge base from Prolog file
(``add_prolog_kb``) would be as follows:
reasoner_add = ReasonerBase(kb_add)

When instantiating, besides the required knowledge base, you may also
specify:

- **max_revision** (int or float, optional), specifies the upper limit
on the number of revisions for each data sample when performing
:ref:`abductive reasoning in the knowledge base <kb-abd>`. If float, denotes the
fraction of the total length that can be revised. A value of -1
implies no restriction on the number of revisions. Defaults to -1.
- **require_more_revision** (int, optional), Specifies additional
number of revisions permitted beyond the minimum required when
performing :ref:`abductive reasoning in the knowledge base <kb-abd>`. Defaults to
0.
- **use_zoopt** (bool, optional), indicating whether to use the Zoopt.
It is a library for zeroth-order optimization that can be used to
accelerate consistency minimization. Defaults to False.
- **dist_func** (str, optional), specifying the distance function to be
used when determining consistency between your prediction and
candidate returned from knowledge base. Valid options include
“confidence” (default) and “hamming”. For “confidence”, it calculates
the distance between the prediction and candidate based on confidence
derived from the predicted probability in the data sample.For
“hamming”, it directly calculates the Hamming distance between the
predicted pseudo label in the data sample and candidate.

The main method implemented by ``ReasonerBase`` is
``abduce(data_sample)``, which obtains the most consistent candidate.

MNIST Add example (cont.)
~~~~~~~~~~~~~~~~~~~~~~~~~

As an example, consider these data samples for MNIST Add:

.. code:: python

add_prolog_kb = PrologKB(pseudo_label_list=list(range(10)),
pl_file="add.pl")

Create a reasoner
-----------------
# favor "1" for the first label
prob1 = [[0, 0.99, 0, 0, 0, 0, 0, 0.01, 0, 0],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]

# favor "7" for the first label
prob2 = [[0, 0.01, 0, 0, 0, 0, 0, 0.99, 0, 0],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]

sample1 = ListData()
sample1.pred_pseudo_label = [1, 1]
sample1.pred_prob = prob1
sample1.Y = 8

sample2 = ListData()
sample2.pred_pseudo_label = [1, 1]
sample2.pred_prob = prob2
sample2.Y = 8

The compatible candidates after abductive reasoning for both samples
would be ``[[1,7], [7,1]]``. However, when selecting only one candidate
based on confidence, the output from ``reasoner_add.abduce`` would
differ for each sample:

=============== ======
``data_sample`` Output
=============== ======
sample1 [1,7]
sample2 [7,1]
=============== ======

+ 2
- 0
docs/Overview/Abductive-Learning.rst View File

@@ -57,6 +57,8 @@ is dual-driven by both data and domain knowledge, integrating and
balancing the use of machine learning and logical reasoning in a unified
model.

.. _abd:

.. admonition:: What is Abductive Reasoning?

Abductive reasoning, also known as abduction, refers to the process of


Loading…
Cancel
Save