diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5668e7faa2acdfb7e9308e63d6107a8a73e4aae0..72069e97efc467a257420df05a9dc0e284b81fd0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,11 @@
 Change Log
 ==========
 
+0.4.1: 2025/01/32
+------------------
+
+Fix issue with working directory containing space character(s)
+
 0.4.0: 2024/11/13
 ------------------
 
diff --git a/src/sluurp/job.py b/src/sluurp/job.py
index fea7a7582e7370cea47c92e992dfc6052fa60799..1af6d08e8b9cb1e1fbc7e7c96807814673f9be50 100644
--- a/src/sluurp/job.py
+++ b/src/sluurp/job.py
@@ -141,7 +141,7 @@ class ScriptJob(Job):
 
     def _write_script_preprocessing_lines(self, file_object):
         if self._working_directory is not None:
-            file_object.write(f"#SBATCH --chdir={self.working_directory}\n")
+            file_object.write(f"#SBATCH --chdir='{self.working_directory}'\n")
 
     def _write_script_postprocessing_lines(self, file_object):
         pass
@@ -171,8 +171,8 @@ class SBatchScriptJob(ScriptJob):
         # uuid4: make sure it is unique each time. Else we can get conflict with different scripts
         # using the same pycuda dir. And the first one ending it will delete it
         # and the second script will not be able to process...
-        self._script.insert(0, f"mkdir -p {self._pycuda_cache_dir}")
-        self._script.insert(1, f"export PYCUDA_CACHE_DIR={self._pycuda_cache_dir}")
+        self._script.insert(0, f"mkdir -p '{self._pycuda_cache_dir}'")
+        self._script.insert(1, f"export PYCUDA_CACHE_DIR='{self._pycuda_cache_dir}'")
 
     def set_status(self, status):
         self._status = status
@@ -205,7 +205,7 @@ class SBatchScriptJob(ScriptJob):
             )
         if self.dry_run is False:
             job_id = SubProcessCommand(
-                command=f"sbatch --export={export} {script_path}"
+                command=f"sbatch --export={export} '{script_path}'"
             ).run()
         else:
             job_id = "-1"
@@ -240,7 +240,7 @@ class SBatchScriptJob(ScriptJob):
         )
         # define out file
         output_file_path = self._get_output_file_path()
-        file_object.write(f"#SBATCH --output={output_file_path}\n")
+        file_object.write(f"#SBATCH --output='{output_file_path}'\n")
 
         for slurm_line in slurm_lines:
             file_object.write(slurm_line + "\n")