diff --git a/LLama/Native/LLamaModelParams.cs b/LLama/Native/LLamaModelParams.cs
index a7cdd1a2..aff2c514 100644
--- a/LLama/Native/LLamaModelParams.cs
+++ b/LLama/Native/LLamaModelParams.cs
@@ -27,13 +27,19 @@ namespace LLama.Native
///
/// how to split layers across multiple GPUs (size: )
///
- public float* tensor_split;
-
+ public float* tensor_split;
+
///
/// called with a progress value between 0 and 1, pass NULL to disable. If the provided progress_callback
/// returns true, model loading continues. If it returns false, model loading is immediately aborted.
///
+#if NETSTANDARD2_0
+ // this code is intended to be used when running LlamaSharp on NET Framework 4.8 (NET Standard 2.0)
+ // as NET Framework 4.8 does not play nice with the LlamaProgressCallback type
+ public IntPtr progress_callback;
+#else
public LlamaProgressCallback progress_callback;
+#endif
///
/// context pointer passed to the progress callback