1#![deny(warnings)]
2#![warn(unused_extern_crates)]
3#![deny(clippy::todo)]
4#![deny(clippy::unimplemented)]
5#![deny(clippy::unwrap_used)]
6#![deny(clippy::expect_used)]
7#![deny(clippy::panic)]
8#![deny(clippy::unreachable)]
9#![deny(clippy::await_holding_lock)]
10#![deny(clippy::needless_pass_by_value)]
11#![deny(clippy::trivially_copy_pass_by_ref)]
12
13use clap::{Arg, ArgAction, Command};
14use futures::{SinkExt, StreamExt};
15use kanidm_client::KanidmClientBuilder;
16use kanidm_hsm_crypto::{
17 provider::{BoxedDynTpm, SoftTpm, Tpm},
18 AuthValue,
19};
20use kanidm_proto::constants::DEFAULT_CLIENT_CONFIG_PATH;
21use kanidm_proto::internal::OperationError;
22use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH;
23use kanidm_unix_common::json_codec::JsonCodec;
24use kanidm_unix_common::unix_config::{HsmType, UnixdConfig};
25use kanidm_unix_common::unix_passwd::EtcDb;
26use kanidm_unix_common::unix_proto::{
27 ClientRequest, ClientResponse, TaskRequest, TaskRequestFrame, TaskResponse,
28};
29use kanidm_unix_resolver::db::{Cache, Db};
30use kanidm_unix_resolver::idprovider::interface::IdProvider;
31use kanidm_unix_resolver::idprovider::kanidm::KanidmProvider;
32use kanidm_unix_resolver::idprovider::system::SystemProvider;
33use kanidm_unix_resolver::resolver::Resolver;
34use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
35use libc::umask;
36use sketching::tracing::span;
37use sketching::tracing_forest::traits::*;
38use sketching::tracing_forest::util::*;
39use sketching::tracing_forest::{self};
40use std::collections::BTreeMap;
41use std::error::Error;
42use std::fs::metadata;
43use std::io::Error as IoError;
44use std::os::unix::fs::MetadataExt;
45use std::path::{Path, PathBuf};
46use std::process::ExitCode;
47use std::str::FromStr;
48use std::sync::Arc;
49use std::time::{Duration, SystemTime};
50use time::OffsetDateTime;
51use tokio::fs::File;
52use tokio::io::AsyncReadExt; use tokio::net::{UnixListener, UnixStream};
54use tokio::sync::broadcast;
55use tokio::sync::mpsc::{channel, Receiver, Sender};
56use tokio::sync::oneshot;
57use tokio_util::codec::Framed;
58
59#[cfg(feature = "dhat-heap")]
60#[global_allocator]
61static ALLOC: dhat::Alloc = dhat::Alloc;
62
63struct AsyncTaskRequest {
66 task_req: TaskRequest,
67 task_chan: oneshot::Sender<()>,
68}
69
70fn rm_if_exist(p: &str) {
72 if Path::new(p).exists() {
73 debug!("Removing requested file {:?}", p);
74 let _ = std::fs::remove_file(p).map_err(|e| {
75 error!(
76 "Failure while attempting to attempting to remove {:?} -> {:?}",
77 p, e
78 );
79 });
80 } else {
81 debug!("Path {:?} doesn't exist, not attempting to remove.", p);
82 }
83}
84
85async fn handle_task_client(
86 stream: UnixStream,
87 notify_shadow_change_tx: &Sender<EtcDb>,
88 task_channel_rx: &mut Receiver<AsyncTaskRequest>,
89 broadcast_rx: &mut broadcast::Receiver<bool>,
90) -> Result<(), Box<dyn Error>> {
91 let mut last_task_id: u64 = 0;
94 let mut task_handles: BTreeMap<u64, oneshot::Sender<()>> = BTreeMap::new();
95
96 let codec: JsonCodec<TaskResponse, TaskRequestFrame> = JsonCodec::default();
97
98 let mut framed_stream = Framed::new(stream, codec);
99
100 loop {
101 tokio::select! {
102 biased; _ = broadcast_rx.recv() => {
105 return Ok(())
106 }
107 response = framed_stream.next() => {
109 match response {
111 Some(Ok(TaskResponse::Success(task_id))) => {
112 debug!("Task was acknowledged and completed.");
113
114 if let Some(handle) = task_handles.remove(&task_id) {
115 let _ = handle.send(());
118 }
119 }
121 Some(Ok(TaskResponse::NotifyShadowChange(etc_db))) => {
122 let _ = notify_shadow_change_tx.send(etc_db).await;
123 }
124 other => {
128 error!("Error -> {:?}", other);
129 return Err(Box::new(IoError::other("oh no!")));
130 }
131 }
132 }
133 task_request = task_channel_rx.recv() => {
134 let Some(AsyncTaskRequest {
135 task_req,
136 task_chan
137 }) = task_request else {
138 return Ok(())
140 };
141
142 debug!("Sending Task -> {:?}", task_req);
143
144 last_task_id += 1;
145 let task_id = last_task_id;
146
147 task_handles.insert(task_id, task_chan);
149
150 let task_frame = TaskRequestFrame {
151 id: task_id,
152 req: task_req,
153 };
154
155 if let Err(err) = framed_stream.send(task_frame).await {
156 warn!("Unable to queue task for completion");
157 return Err(Box::new(err));
158 }
159 }
161 }
162 }
163}
164
165async fn handle_client(
166 sock: UnixStream,
167 cachelayer: Arc<Resolver>,
168 task_channel_tx: &Sender<AsyncTaskRequest>,
169) -> Result<(), Box<dyn Error>> {
170 let conn_id = uuid::Uuid::new_v4();
171
172 let span = span!(Level::DEBUG, "accepted connection", uuid = %conn_id);
173 let _enter = span.enter();
174
175 let Ok(ucred) = sock.peer_cred() else {
176 return Err(Box::new(IoError::other(
177 "Unable to verify peer credentials.",
178 )));
179 };
180
181 debug!(uid = ?ucred.uid(), gid = ?ucred.gid(), pid = ?ucred.pid());
182
183 let codec: JsonCodec<ClientRequest, ClientResponse> = JsonCodec::default();
184
185 let mut reqs = Framed::new(sock, codec);
186 let mut pam_auth_session_state = None;
187
188 let (shutdown_tx, _shutdown_rx) = broadcast::channel(1);
191
192 debug!("Waiting for requests ...");
193 drop(_enter);
195
196 while let Some(Ok(req)) = reqs.next().await {
197 let span = span!(Level::INFO, "client request", uuid = %conn_id, defer = true);
198 let _enter = span.enter();
199
200 let resp = match req {
201 ClientRequest::SshKey(account_id) => cachelayer
202 .get_sshkeys(account_id.as_str())
203 .await
204 .map(ClientResponse::SshKeys)
205 .unwrap_or_else(|_| {
206 error!("unable to load keys, returning empty set.");
207 ClientResponse::SshKeys(vec![])
208 }),
209 ClientRequest::NssAccounts => cachelayer
210 .get_nssaccounts()
211 .await
212 .map(ClientResponse::NssAccounts)
213 .unwrap_or_else(|_| {
214 error!("unable to enum accounts");
215 ClientResponse::NssAccounts(Vec::new())
216 }),
217 ClientRequest::NssAccountByUid(gid) => cachelayer
218 .get_nssaccount_gid(gid)
219 .await
220 .map(ClientResponse::NssAccount)
221 .unwrap_or_else(|_| {
222 error!("unable to load account, returning empty.");
223 ClientResponse::NssAccount(None)
224 }),
225 ClientRequest::NssAccountByName(account_id) => cachelayer
226 .get_nssaccount_name(account_id.as_str())
227 .await
228 .map(ClientResponse::NssAccount)
229 .unwrap_or_else(|_| {
230 error!("unable to load account, returning empty.");
231 ClientResponse::NssAccount(None)
232 }),
233 ClientRequest::NssGroups => cachelayer
234 .get_nssgroups()
235 .await
236 .map(ClientResponse::NssGroups)
237 .unwrap_or_else(|_| {
238 error!("unable to enum groups");
239 ClientResponse::NssGroups(Vec::new())
240 }),
241 ClientRequest::NssGroupByGid(gid) => cachelayer
242 .get_nssgroup_gid(gid)
243 .await
244 .map(ClientResponse::NssGroup)
245 .unwrap_or_else(|_| {
246 error!("unable to load group, returning empty.");
247 ClientResponse::NssGroup(None)
248 }),
249 ClientRequest::NssGroupByName(grp_id) => cachelayer
250 .get_nssgroup_name(grp_id.as_str())
251 .await
252 .map(ClientResponse::NssGroup)
253 .unwrap_or_else(|_| {
254 error!("unable to load group, returning empty.");
255 ClientResponse::NssGroup(None)
256 }),
257 ClientRequest::PamAuthenticateInit { account_id, info } => {
258 match &pam_auth_session_state {
259 Some(_auth_session) => {
260 warn!("Attempt to init auth session while current session is active");
262 pam_auth_session_state = None;
264 ClientResponse::Error(OperationError::KU001InitWhileSessionActive)
265 }
266 None => {
267 let current_time = OffsetDateTime::now_utc();
268
269 match cachelayer
270 .pam_account_authenticate_init(
271 account_id.as_str(),
272 &info,
273 current_time,
274 shutdown_tx.subscribe(),
275 )
276 .await
277 {
278 Ok((auth_session, pam_auth_response)) => {
279 pam_auth_session_state = Some(auth_session);
280 pam_auth_response.into()
281 }
282 Err(_) => ClientResponse::Error(OperationError::KU004PamInitFailed),
283 }
284 }
285 }
286 }
287 ClientRequest::PamAuthenticateStep(pam_next_req) => match &mut pam_auth_session_state {
288 Some(auth_session) => cachelayer
289 .pam_account_authenticate_step(auth_session, pam_next_req)
290 .await
291 .map(|pam_auth_response| pam_auth_response.into())
292 .unwrap_or(ClientResponse::Error(OperationError::KU003PamAuthFailed)),
293 None => {
294 warn!("Attempt to continue auth session while current session is inactive");
295 ClientResponse::Error(OperationError::KU002ContinueWhileSessionInActive)
296 }
297 },
298 ClientRequest::PamAccountAllowed(account_id) => cachelayer
299 .pam_account_allowed(account_id.as_str())
300 .await
301 .map(ClientResponse::PamStatus)
302 .unwrap_or(ClientResponse::Error(
303 OperationError::KU005ErrorCheckingAccount,
304 )),
305 ClientRequest::PamAccountBeginSession(account_id) => {
306 match cachelayer
307 .pam_account_beginsession(account_id.as_str())
308 .await
309 {
310 Ok(Some(info)) => {
311 let (tx, rx) = oneshot::channel();
312
313 match task_channel_tx
314 .send_timeout(
315 AsyncTaskRequest {
316 task_req: TaskRequest::HomeDirectory(info),
317 task_chan: tx,
318 },
319 Duration::from_millis(100),
320 )
321 .await
322 {
323 Ok(()) => {
324 match tokio::time::timeout_at(
326 tokio::time::Instant::now() + Duration::from_millis(1000),
327 rx,
328 )
329 .await
330 {
331 Ok(Ok(_)) => {
332 debug!("Task completed, returning to pam ...");
333 ClientResponse::Ok
334 }
335 _ => {
336 ClientResponse::Error(OperationError::KG001TaskTimeout)
338 }
339 }
340 }
341 Err(_) => {
342 ClientResponse::Error(OperationError::KG002TaskCommFailure)
344 }
345 }
346 }
347 Ok(None) => {
348 ClientResponse::Ok
350 }
351 Err(_) => ClientResponse::Error(OperationError::KU005ErrorCheckingAccount),
352 }
353 }
354 ClientRequest::InvalidateCache => cachelayer
355 .invalidate()
356 .await
357 .map(|_| ClientResponse::Ok)
358 .unwrap_or(ClientResponse::Error(OperationError::KG003CacheClearFailed)),
359 ClientRequest::ClearCache => {
360 if ucred.uid() == 0 {
361 cachelayer
362 .clear_cache()
363 .await
364 .map(|_| ClientResponse::Ok)
365 .unwrap_or(ClientResponse::Error(OperationError::KG003CacheClearFailed))
366 } else {
367 error!("{}", OperationError::KU006OnlyRootAllowed);
368 ClientResponse::Error(OperationError::KU006OnlyRootAllowed)
369 }
370 }
371 ClientRequest::Status => {
372 let status = cachelayer.provider_status().await;
373 ClientResponse::ProviderStatus(status)
374 }
375 };
376 reqs.send(resp).await?;
377 reqs.flush().await?;
378 trace!("flushed response!");
379 }
380
381 if let Err(shutdown_err) = shutdown_tx.send(()) {
383 warn!(
384 ?shutdown_err,
385 "Unable to signal tasks to stop, they will naturally timeout instead."
386 )
387 }
388
389 let span = span!(Level::DEBUG, "disconnecting client", uuid = %conn_id);
391 let _enter = span.enter();
392 debug!(uid = ?ucred.uid(), gid = ?ucred.gid(), pid = ?ucred.pid());
393
394 Ok(())
395}
396
397async fn read_hsm_pin(hsm_pin_path: &str) -> Result<Vec<u8>, Box<dyn Error>> {
398 if !PathBuf::from_str(hsm_pin_path)?.exists() {
399 return Err(std::io::Error::new(
400 std::io::ErrorKind::NotFound,
401 format!("HSM PIN file '{hsm_pin_path}' not found"),
402 )
403 .into());
404 }
405
406 let mut file = File::open(hsm_pin_path).await?;
407 let mut contents = vec![];
408 file.read_to_end(&mut contents).await?;
409 Ok(contents)
410}
411
412async fn write_hsm_pin(hsm_pin_path: &str) -> Result<(), Box<dyn Error>> {
413 if !PathBuf::from_str(hsm_pin_path)?.exists() {
414 let new_pin = AuthValue::generate().map_err(|hsm_err| {
415 error!(?hsm_err, "Unable to generate new pin");
416 std::io::Error::other("Unable to generate new pin")
417 })?;
418
419 std::fs::write(hsm_pin_path, new_pin)?;
420
421 info!("Generated new HSM pin");
422 }
423
424 Ok(())
425}
426
427#[cfg(feature = "tpm")]
428fn open_tpm(tcti_name: &str) -> Option<BoxedDynTpm> {
429 use kanidm_hsm_crypto::provider::TssTpm;
430 match TssTpm::new(tcti_name) {
431 Ok(tpm) => {
432 debug!("opened hw tpm");
433 Some(BoxedDynTpm::new(tpm))
434 }
435 Err(tpm_err) => {
436 error!(?tpm_err, "Unable to open requested tpm device");
437 None
438 }
439 }
440}
441
442#[cfg(not(feature = "tpm"))]
443fn open_tpm(_tcti_name: &str) -> Option<BoxedDynTpm> {
444 error!("Hardware TPM supported was not enabled in this build. Unable to proceed");
445 None
446}
447
448#[cfg(feature = "tpm")]
449fn open_tpm_if_possible(tcti_name: &str) -> BoxedDynTpm {
450 use kanidm_hsm_crypto::provider::TssTpm;
451 match TssTpm::new(tcti_name) {
452 Ok(tpm) => {
453 debug!("opened hw tpm");
454 BoxedDynTpm::new(tpm)
455 }
456 Err(tpm_err) => {
457 warn!(
458 ?tpm_err,
459 "Unable to open requested tpm device, falling back to soft tpm"
460 );
461 BoxedDynTpm::new(SoftTpm::new())
462 }
463 }
464}
465
466#[cfg(not(feature = "tpm"))]
467fn open_tpm_if_possible(_tcti_name: &str) -> BoxedDynTpm {
468 debug!("opened soft tpm");
469 BoxedDynTpm::new(SoftTpm::default())
470}
471
472#[tokio::main(flavor = "current_thread")]
473async fn main() -> ExitCode {
474 #[cfg(all(target_os = "linux", not(debug_assertions)))]
477 if let Err(code) = prctl::set_dumpable(false) {
478 error!(?code, "CRITICAL: Unable to set prctl flags");
479 return ExitCode::FAILURE;
480 }
481
482 #[cfg(feature = "dhat-heap")]
484 let _profiler = dhat::Profiler::builder()
485 .file_name(format!(
486 "/var/cache/kanidm-unixd/heap-{}.json",
487 std::process::id()
488 ))
489 .trim_backtraces(Some(40))
490 .build();
491
492 let cuid = get_current_uid();
493 let ceuid = get_effective_uid();
494 let cgid = get_current_gid();
495 let cegid = get_effective_gid();
496
497 let clap_args = Command::new("kanidm_unixd")
498 .version(env!("CARGO_PKG_VERSION"))
499 .about("Kanidm Unix daemon")
500 .arg(
501 Arg::new("skip-root-check")
502 .help("Allow running as root. Don't use this in production as it is risky!")
503 .short('r')
504 .long("skip-root-check")
505 .env("KANIDM_SKIP_ROOT_CHECK")
506 .action(ArgAction::SetTrue),
507 )
508 .arg(
509 Arg::new("debug")
510 .help("Show extra debug information")
511 .short('d')
512 .long("debug")
513 .env("KANIDM_DEBUG")
514 .action(ArgAction::SetTrue),
515 )
516 .arg(
517 Arg::new("configtest")
518 .help("Display the configuration and exit")
519 .short('t')
520 .long("configtest")
521 .action(ArgAction::SetTrue),
522 )
523 .arg(
524 Arg::new("unixd-config")
525 .help("Set the unixd config file path")
526 .short('u')
527 .long("unixd-config")
528 .default_value(DEFAULT_CONFIG_PATH)
529 .env("KANIDM_UNIX_CONFIG")
530 .action(ArgAction::Set),
531 )
532 .arg(
533 Arg::new("client-config")
534 .help("Set the client config file path")
535 .short('c')
536 .long("client-config")
537 .default_value(DEFAULT_CLIENT_CONFIG_PATH)
538 .env("KANIDM_CLIENT_CONFIG")
539 .action(ArgAction::Set),
540 )
541 .get_matches();
542
543 if clap_args.get_flag("debug") {
544 std::env::set_var("RUST_LOG", "debug");
545 }
546
547 #[allow(clippy::expect_used)]
548 tracing_forest::worker_task()
549 .set_global(true)
550 .map_sender(|sender| sender.or_stderr())
552 .build_on(|subscriber| subscriber
553 .with(EnvFilter::try_from_default_env()
554 .or_else(|_| EnvFilter::try_new("info"))
555 .expect("Failed to init envfilter")
556 )
557 )
558 .on(async {
559 let span = span!(Level::DEBUG, "starting resolver");
560 let _enter = span.enter();
561
562 if clap_args.get_flag("skip-root-check") {
563 warn!("Skipping root user check, if you're running this for testing, ensure you clean up temporary files.")
564 } else if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
566 error!("Refusing to run - this process must not operate as root.");
567 return ExitCode::FAILURE
568 };
569
570 debug!("Profile -> {}", env!("KANIDM_PROFILE_NAME"));
571 debug!("CPU Flags -> {}", env!("KANIDM_CPU_FLAGS"));
572
573 let Some(cfg_path_str) = clap_args.get_one::<String>("client-config") else {
574 error!("Failed to pull the client config path");
575 return ExitCode::FAILURE
576 };
577 let cfg_path: PathBuf = PathBuf::from(cfg_path_str);
578
579 if !cfg_path.exists() {
580 error!(
582 "Client config missing from {} - cannot start up. Quitting.",
583 cfg_path_str
584 );
585 let diag = kanidm_lib_file_permissions::diagnose_path(cfg_path.as_ref());
586 info!(%diag);
587 return ExitCode::FAILURE
588 } else {
589 let cfg_meta = match metadata(&cfg_path) {
590 Ok(v) => v,
591 Err(e) => {
592 error!("Unable to read metadata for {} - {:?}", cfg_path_str, e);
593 let diag = kanidm_lib_file_permissions::diagnose_path(cfg_path.as_ref());
594 info!(%diag);
595 return ExitCode::FAILURE
596 }
597 };
598 if !kanidm_lib_file_permissions::readonly(&cfg_meta) {
599 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
600 cfg_path_str
601 );
602 }
603
604 if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid {
605 warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
606 cfg_path_str
607 );
608 }
609 }
610
611 let Some(unixd_path_str) = clap_args.get_one::<String>("unixd-config") else {
612 error!("Failed to pull the unixd config path");
613 return ExitCode::FAILURE
614 };
615 let unixd_path = PathBuf::from(unixd_path_str);
616
617 if !unixd_path.exists() {
618 error!(
620 "unixd config missing from {} - cannot start up. Quitting.",
621 unixd_path_str
622 );
623 let diag = kanidm_lib_file_permissions::diagnose_path(unixd_path.as_ref());
624 info!(%diag);
625 return ExitCode::FAILURE
626 } else {
627 let unixd_meta = match metadata(&unixd_path) {
628 Ok(v) => v,
629 Err(e) => {
630 error!("Unable to read metadata for {} - {:?}", unixd_path_str, e);
631 let diag = kanidm_lib_file_permissions::diagnose_path(unixd_path.as_ref());
632 info!(%diag);
633 return ExitCode::FAILURE
634 }
635 };
636 if !kanidm_lib_file_permissions::readonly(&unixd_meta) {
637 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
638 unixd_path_str);
639 }
640
641 if unixd_meta.uid() == cuid || unixd_meta.uid() == ceuid {
642 warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
643 unixd_path_str
644 );
645 }
646 }
647
648 let cfg = match UnixdConfig::new().read_options_from_optional_config(&unixd_path) {
649 Ok(v) => v,
650 Err(_) => {
651 error!("Failed to parse {}", unixd_path_str);
652 return ExitCode::FAILURE
653 }
654 };
655
656 let client_builder = if let Some(kconfig) = &cfg.kanidm_config {
657 let cb = match KanidmClientBuilder::new().read_options_from_optional_config(&cfg_path) {
659 Ok(v) => v,
660 Err(_) => {
661 error!("Failed to parse {}", cfg_path_str);
662 return ExitCode::FAILURE
663 }
664 };
665
666 Some((cb, kconfig))
667 } else { None };
668
669 if clap_args.get_flag("configtest") {
670 eprintln!("###################################");
671 eprintln!("Dumping configs:\n###################################");
672 eprintln!("kanidm_unixd config (from {:#?})", &unixd_path);
673 eprintln!("{cfg}");
674 eprintln!("###################################");
675 if let Some((cb, _)) = client_builder.as_ref() {
676 eprintln!("kanidm client config (from {:#?})", &cfg_path);
677 eprintln!("{cb}");
678 } else {
679 eprintln!("kanidm client: disabled");
680 }
681 return ExitCode::SUCCESS;
682 }
683
684 debug!("🧹 Cleaning up sockets from previous invocations");
685 rm_if_exist(cfg.sock_path.as_str());
686 rm_if_exist(cfg.task_sock_path.as_str());
687
688 if !cfg.cache_db_path.is_empty() {
690 let cache_db_path = PathBuf::from(cfg.cache_db_path.as_str());
691 if let Some(db_parent_path) = cache_db_path.parent() {
693 if !db_parent_path.exists() {
694 error!(
695 "Refusing to run, DB folder {} does not exist",
696 db_parent_path
697 .to_str()
698 .unwrap_or("<db_parent_path invalid>")
699 );
700 let diag = kanidm_lib_file_permissions::diagnose_path(cache_db_path.as_ref());
701 info!(%diag);
702 return ExitCode::FAILURE
703 }
704
705 let db_par_path_buf = db_parent_path.to_path_buf();
706
707 let i_meta = match metadata(&db_par_path_buf) {
708 Ok(v) => v,
709 Err(e) => {
710 error!(
711 "Unable to read metadata for {} - {:?}",
712 db_par_path_buf
713 .to_str()
714 .unwrap_or("<db_par_path_buf invalid>"),
715 e
716 );
717 return ExitCode::FAILURE
718 }
719 };
720
721 if !i_meta.is_dir() {
722 error!(
723 "Refusing to run - DB folder {} may not be a directory",
724 db_par_path_buf
725 .to_str()
726 .unwrap_or("<db_par_path_buf invalid>")
727 );
728 return ExitCode::FAILURE
729 }
730 if kanidm_lib_file_permissions::readonly(&i_meta) {
731 warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str()
732 .unwrap_or("<db_par_path_buf invalid>")
733 );
734 }
735
736 if i_meta.mode() & 0o007 != 0 {
737 warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str()
738 .unwrap_or("<db_par_path_buf invalid>")
739 );
740 }
741 }
742
743 if cache_db_path.exists() {
745 if !cache_db_path.is_file() {
746 error!(
747 "Refusing to run - DB path {} already exists and is not a file.",
748 cache_db_path.to_str().unwrap_or("<cache_db_path invalid>")
749 );
750 let diag = kanidm_lib_file_permissions::diagnose_path(cache_db_path.as_ref());
751 info!(%diag);
752 return ExitCode::FAILURE
753 };
754
755 match metadata(&cache_db_path) {
756 Ok(v) => v,
757 Err(e) => {
758 error!(
759 "Unable to read metadata for {} - {:?}",
760 cache_db_path.to_str().unwrap_or("<cache_db_path invalid>"),
761 e
762 );
763 let diag = kanidm_lib_file_permissions::diagnose_path(cache_db_path.as_ref());
764 info!(%diag);
765 return ExitCode::FAILURE
766 }
767 };
768 };
770 }
771
772 let db = match Db::new(cfg.cache_db_path.as_str()) {
773 Ok(db) => db,
774 Err(_e) => {
775 error!("Failed to create database");
776 return ExitCode::FAILURE
777 }
778 };
779
780 let mut dbtxn = db.write().await;
782 if dbtxn.migrate()
783 .and_then(|_| {
784 dbtxn.commit()
785 }).is_err() {
786 error!("Failed to migrate database");
787 return ExitCode::FAILURE
788 }
789
790 if let Err(err) = write_hsm_pin(cfg.hsm_pin_path.as_str()).await {
792 let diag = kanidm_lib_file_permissions::diagnose_path(cfg.hsm_pin_path.as_ref());
793 info!(%diag);
794 error!(?err, "Failed to create HSM PIN into {}", cfg.hsm_pin_path.as_str());
795 return ExitCode::FAILURE
796 };
797
798 let hsm_pin = match read_hsm_pin(cfg.hsm_pin_path.as_str()).await {
800 Ok(hp) => hp,
801 Err(err) => {
802 let diag = kanidm_lib_file_permissions::diagnose_path(cfg.hsm_pin_path.as_ref());
803 info!(%diag);
804 error!(?err, "Failed to read HSM PIN from {}", cfg.hsm_pin_path.as_str());
805 return ExitCode::FAILURE
806 }
807 };
808
809 let auth_value = match AuthValue::try_from(hsm_pin.as_slice()) {
810 Ok(av) => av,
811 Err(err) => {
812 error!(?err, "invalid hsm pin");
813 return ExitCode::FAILURE
814 }
815 };
816
817 let mut hsm: BoxedDynTpm = match cfg.hsm_type {
818 HsmType::Soft => {
819 BoxedDynTpm::new(SoftTpm::default())
820 }
821 HsmType::TpmIfPossible => {
822 open_tpm_if_possible(&cfg.tpm_tcti_name)
823 }
824 HsmType::Tpm => {
825 match open_tpm(&cfg.tpm_tcti_name) {
826 Some(hsm) => hsm,
827 None => return ExitCode::FAILURE,
828 }
829 }
830 };
831
832 let mut db_txn = db.write().await;
834
835 let loadable_machine_key = match db_txn.get_hsm_root_storage_key() {
836 Ok(Some(lmk)) => lmk,
837 Ok(None) => {
838 let loadable_machine_key = match hsm.root_storage_key_create(&auth_value) {
840 Ok(lmk) => lmk,
841 Err(err) => {
842 error!(?err, "Unable to create hsm loadable machine key");
843 return ExitCode::FAILURE
844 }
845 };
846
847 if let Err(err) = db_txn.insert_hsm_root_storage_key(&loadable_machine_key) {
848 error!(?err, "Unable to persist hsm loadable machine key");
849 return ExitCode::FAILURE
850 }
851
852 loadable_machine_key
853 }
854 Err(err) => {
855 error!(?err, "Unable to access hsm loadable machine key");
856 return ExitCode::FAILURE
857 }
858 };
859
860 let machine_key = match hsm.root_storage_key_load(&auth_value, &loadable_machine_key) {
861 Ok(mk) => mk,
862 Err(err) => {
863 error!(?err, "Unable to load machine root key - This can occur if you have changed your HSM pin");
864 error!("To proceed you must remove the content of the cache db ({}) to reset all keys", cfg.cache_db_path.as_str());
865 return ExitCode::FAILURE
866 }
867 };
868
869 let Ok(system_provider) = SystemProvider::new(
870 ) else {
871 error!("Failed to configure System Provider");
872 return ExitCode::FAILURE
873 };
874
875 info!("Started system provider");
876
877 let mut clients: Vec<Arc<dyn IdProvider + Send + Sync>> = Vec::with_capacity(1);
878
879 if let Some((cb, kconfig)) = client_builder {
881 let cb = cb.connect_timeout(kconfig.conn_timeout);
882 let cb = cb.request_timeout(kconfig.request_timeout);
883
884 let rsclient = match cb.build() {
885 Ok(rsc) => rsc,
886 Err(_e) => {
887 error!("Failed to build async client");
888 return ExitCode::FAILURE
889 }
890 };
891
892 let Ok(idprovider) = KanidmProvider::new(
893 rsclient,
894 kconfig,
895 SystemTime::now(),
896 &mut (&mut db_txn).into(),
897 &mut hsm,
898 &machine_key
899 ) else {
900 error!("Failed to configure Kanidm Provider");
901 return ExitCode::FAILURE
902 };
903
904 clients.push(Arc::new(idprovider));
906 info!("Started kanidm provider");
907 }
908
909 drop(machine_key);
910
911 if let Err(err) = db_txn.commit() {
912 error!(?err, "Failed to commit database transaction, unable to proceed");
913 return ExitCode::FAILURE
914 }
915
916 if !cfg.default_shell.is_empty() {
917 let shell_path = PathBuf::from_str(&cfg.default_shell).expect("Failed to build a representation of your default_shell path!");
918 if !shell_path.exists() {
919 error!("Cannot find configured default shell at {}, this could cause login issues!", shell_path.display())
920 }
921 }
922
923 let cl_inner = match Resolver::new(
925 db,
926 Arc::new(system_provider),
927 clients,
928 hsm,
929 cfg.cache_timeout,
930 cfg.default_shell.clone(),
931 cfg.home_prefix.clone(),
932 cfg.home_attr,
933 cfg.home_alias,
934 cfg.uid_attr_map,
935 cfg.gid_attr_map,
936 )
937 .await
938 {
939 Ok(c) => c,
940 Err(_e) => {
941 error!("Failed to build cache layer.");
942 return ExitCode::FAILURE
943 }
944 };
945
946 let cachelayer = Arc::new(cl_inner);
947
948 let before = unsafe { umask(0o0077) };
950 let task_listener = match UnixListener::bind(cfg.task_sock_path.as_str()) {
951 Ok(l) => l,
952 Err(_e) => {
953 let diag = kanidm_lib_file_permissions::diagnose_path(cfg.task_sock_path.as_ref());
954 info!(%diag);
955 error!("Failed to bind UNIX socket {}", cfg.task_sock_path.as_str());
956 return ExitCode::FAILURE
957 }
958 };
959 let _ = unsafe { umask(before) };
961
962 let (task_channel_tx, mut task_channel_rx) = channel(16);
968 let task_channel_tx = Arc::new(task_channel_tx);
969 let task_channel_tx_cln = task_channel_tx.clone();
970 let (notify_shadow_channel_tx, mut notify_shadow_channel_rx) = channel(16);
981 let notify_shadow_channel_tx = Arc::new(notify_shadow_channel_tx);
982
983 let (broadcast_tx, mut broadcast_rx) = broadcast::channel(4);
986 let mut c_broadcast_rx = broadcast_tx.subscribe();
987 let mut d_broadcast_rx = broadcast_tx.subscribe();
988
989 let task_b = tokio::spawn(async move {
990 loop {
991 tokio::select! {
992 _ = c_broadcast_rx.recv() => {
994 break;
995 }
996 accept_res = task_listener.accept() => {
997 match accept_res {
998 Ok((socket, _addr)) => {
999 if let Ok(ucred) = socket.peer_cred() {
1003 if ucred.uid() != 0 {
1004 warn!("Task handler not running as root, ignoring ...");
1006 continue;
1007 }
1008 } else {
1009 warn!("Unable to determine socked peer cred, ignoring ...");
1011 continue;
1012 };
1013 debug!("A task handler has connected.");
1014 if let Err(err) = handle_task_client(socket, ¬ify_shadow_channel_tx, &mut task_channel_rx, &mut d_broadcast_rx).await {
1019 error!(?err, "Task client error occurred");
1020 }
1021 }
1023 Err(err) => {
1024 error!("Task Accept error -> {:?}", err);
1025 }
1026 }
1027 }
1028 }
1029 }
1031 info!("Stopped task connector");
1032 });
1033
1034 let shadow_notify_cachelayer = cachelayer.clone();
1037 let mut c_broadcast_rx = broadcast_tx.subscribe();
1038
1039 let task_c = tokio::spawn(async move {
1040 debug!("Spawned shadow reload task handler");
1041 loop {
1042 tokio::select! {
1043 _ = c_broadcast_rx.recv() => {
1044 break;
1045 }
1046 Some(EtcDb {
1047 users, shadow, groups
1048 }) = notify_shadow_channel_rx.recv() => {
1049 shadow_notify_cachelayer
1050 .reload_system_identities(users, shadow, groups)
1051 .await;
1052 }
1053 }
1054 }
1055 info!("Stopped shadow reload task handler");
1056 });
1057
1058 let before = unsafe { umask(0) };
1062 let listener = match UnixListener::bind(cfg.sock_path.as_str()) {
1063 Ok(l) => l,
1064 Err(_e) => {
1065 error!("Failed to bind UNIX socket at {}", cfg.sock_path.as_str());
1066 return ExitCode::FAILURE
1067 }
1068 };
1069 let _ = unsafe { umask(before) };
1071
1072 let task_a = tokio::spawn(async move {
1073 loop {
1074 let tc_tx = task_channel_tx_cln.clone();
1075
1076 tokio::select! {
1077 _ = broadcast_rx.recv() => {
1078 break;
1079 }
1080 accept_res = listener.accept() => {
1081 match accept_res {
1082 Ok((socket, _addr)) => {
1083 let cachelayer_ref = cachelayer.clone();
1084 tokio::spawn(async move {
1085 if let Err(e) = handle_client(socket, cachelayer_ref.clone(), &tc_tx).await
1086 {
1087 error!("handle_client error occurred; error = {:?}", e);
1088 }
1089 });
1090 }
1091 Err(err) => {
1092 error!("Error while handling connection -> {:?}", err);
1093 }
1094 }
1095 }
1096 }
1097
1098 }
1099 info!("Stopped resolver");
1100 });
1101
1102 info!("Server started ...");
1103
1104 drop(_enter);
1106
1107 #[cfg(target_os = "linux")]
1109 let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
1110
1111 loop {
1112 tokio::select! {
1113 Ok(()) = tokio::signal::ctrl_c() => {
1114 break
1115 }
1116 Some(()) = async move {
1117 let sigterm = tokio::signal::unix::SignalKind::terminate();
1118 #[allow(clippy::unwrap_used)]
1119 tokio::signal::unix::signal(sigterm).unwrap().recv().await
1120 } => {
1121 break
1122 }
1123 Some(()) = async move {
1124 let sigterm = tokio::signal::unix::SignalKind::alarm();
1125 #[allow(clippy::unwrap_used)]
1126 tokio::signal::unix::signal(sigterm).unwrap().recv().await
1127 } => {
1128 }
1130 Some(()) = async move {
1131 let sigterm = tokio::signal::unix::SignalKind::hangup();
1132 #[allow(clippy::unwrap_used)]
1133 tokio::signal::unix::signal(sigterm).unwrap().recv().await
1134 } => {
1135 }
1137 Some(()) = async move {
1138 let sigterm = tokio::signal::unix::SignalKind::user_defined1();
1139 #[allow(clippy::unwrap_used)]
1140 tokio::signal::unix::signal(sigterm).unwrap().recv().await
1141 } => {
1142 }
1144 Some(()) = async move {
1145 let sigterm = tokio::signal::unix::SignalKind::user_defined2();
1146 #[allow(clippy::unwrap_used)]
1147 tokio::signal::unix::signal(sigterm).unwrap().recv().await
1148 } => {
1149 }
1151 }
1152 }
1153 info!("Signal received, sending down signal to tasks");
1154 if let Err(e) = broadcast_tx.send(true) {
1156 error!("Unable to shutdown workers {:?}", e);
1157 }
1158
1159 let _ = task_a.await;
1160 let _ = task_b.await;
1161 let _ = task_c.await;
1162
1163 ExitCode::SUCCESS
1164 })
1165 .await
1166 }