kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // No domain info was present, so neither was the rest of the IDM. Bring up the
58            // full IDM here.
59            match domain_target_level {
60                DOMAIN_LEVEL_9 => write_txn.migrate_domain_8_to_9()?,
61                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
62                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
63                DOMAIN_LEVEL_12 => write_txn.migrate_domain_11_to_12()?,
64                DOMAIN_LEVEL_13 => write_txn.migrate_domain_12_to_13()?,
65                _ => {
66                    error!("Invalid requested domain target level for server bootstrap");
67                    debug_assert!(false);
68                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
69                }
70            }
71        } else {
72            // Domain info was present, so we need to reflect that in our server
73            // domain structures. If we don't do this, the in memory domain level
74            // is stuck at 0 which can confuse init domain info below.
75            //
76            // This also is where the former domain taint flag will be loaded to
77            // d_info so that if the *previous* execution of the database was
78            // a devel version, we'll still trigger the forced remigration in
79            // in the case that we are moving from dev -> stable.
80            write_txn.force_domain_reload();
81
82            write_txn.reload()?;
83
84            // Indicate the schema is now ready, which allows dyngroups to work when they
85            // are created in the next phase of migrations.
86            write_txn.set_phase(ServerPhase::SchemaReady);
87
88            // #2756 - if we *aren't* creating the base IDM entries, then we
89            // need to force dyn groups to reload since we're now at schema
90            // ready. This is done indirectly by ... reloading the schema again.
91            //
92            // This is because dyngroups don't load until server phase >= schemaready
93            // and the reload path for these is either a change in the dyngroup entry
94            // itself or a change to schema reloading. Since we aren't changing the
95            // dyngroup here, we have to go via the schema reload path.
96            write_txn.force_schema_reload();
97
98            // Reload as init idm affects access controls.
99            write_txn.reload()?;
100
101            // Domain info is now ready and reloaded, we can proceed.
102            write_txn.set_phase(ServerPhase::DomainInfoReady);
103        }
104
105        // This is the start of domain info related migrations which we will need in future
106        // to handle replication. Due to the access control rework, and the addition of "managed by"
107        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
108        // by changing domain infos to be incompatible during this phase.
109
110        // The reloads will have populated this structure now.
111        let domain_info_version = write_txn.get_domain_version();
112        let domain_patch_level = write_txn.get_domain_patch_level();
113        let domain_development_taint = write_txn.get_domain_development_taint();
114        debug!(
115            ?db_domain_version,
116            ?domain_patch_level,
117            ?domain_development_taint,
118            "After setting internal domain info"
119        );
120
121        let mut reload_required = false;
122
123        // If the database domain info is a lower version than our target level, we reload.
124        if domain_info_version < domain_target_level {
125            write_txn
126                .internal_apply_domain_migration(domain_target_level)
127                .map(|()| {
128                    warn!("Domain level has been raised to {}", domain_target_level);
129                })?;
130            // Reload if anything in migrations requires it - this triggers the domain migrations
131            // which in turn can trigger schema reloads etc. If the server was just brought up
132            // then we don't need the extra reload since we are already at the correct
133            // version of the server, and this call to set the target level is just for persistance
134            // of the value.
135            if domain_info_version != 0 {
136                reload_required = true;
137            }
138        } else if domain_development_taint {
139            // This forces pre-release versions to re-migrate each start up. This solves
140            // the domain-version-sprawl issue so that during a development cycle we can
141            // do a single domain version bump, and continue to extend the migrations
142            // within that release cycle to contain what we require.
143            //
144            // If this is a pre-release build
145            // AND
146            // we are NOT in a test environment
147            // AND
148            // We did not already need a version migration as above
149            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
150
151            reload_required = true;
152        }
153
154        // If we are new enough to support patches, and we are lower than the target patch level
155        // then a reload will be applied after we raise the patch level.
156        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
157            write_txn
158                .internal_modify_uuid(
159                    UUID_DOMAIN_INFO,
160                    &ModifyList::new_purge_and_set(
161                        Attribute::PatchLevel,
162                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
163                    ),
164                )
165                .map(|()| {
166                    warn!(
167                        "Domain patch level has been raised to {}",
168                        domain_patch_level
169                    );
170                })?;
171
172            reload_required = true;
173        };
174
175        // Execute whatever operations we have batched up and ready to go. This is needed
176        // to preserve ordering of the operations - if we reloaded after a remigrate then
177        // we would have skipped the patch level fix which needs to have occurred *first*.
178        if reload_required {
179            write_txn.reload()?;
180        }
181
182        // Now set the db/domain devel taint flag to match our current release status
183        // if it changes. This is what breaks the cycle of db taint from dev -> stable
184        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
185        if current_devel_flag {
186            warn!("Domain Development Taint mode is enabled");
187        }
188        if domain_development_taint != current_devel_flag {
189            write_txn.internal_modify_uuid(
190                UUID_DOMAIN_INFO,
191                &ModifyList::new_purge_and_set(
192                    Attribute::DomainDevelopmentTaint,
193                    Value::Bool(current_devel_flag),
194                ),
195            )?;
196        }
197
198        // We are ready to run
199        write_txn.set_phase(ServerPhase::Running);
200
201        // Commit all changes, this also triggers the final reload, this should be a no-op
202        // since we already did all the needed loads above.
203        write_txn.commit()?;
204
205        debug!("Database version check and migrations success! ☀️  ");
206        Ok(())
207    }
208}
209
210impl QueryServerWriteTransaction<'_> {
211    /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
212    /// level.
213    pub(crate) fn internal_apply_domain_migration(
214        &mut self,
215        to_level: u32,
216    ) -> Result<(), OperationError> {
217        assert!(to_level > self.get_domain_version());
218        self.internal_modify_uuid(
219            UUID_DOMAIN_INFO,
220            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
221        )
222        .and_then(|()| self.reload())
223    }
224
225    fn internal_migrate_or_create_batch(
226        &mut self,
227        msg: &str,
228        entries: Vec<EntryInitNew>,
229    ) -> Result<(), OperationError> {
230        let r: Result<(), _> = entries
231            .into_iter()
232            .try_for_each(|entry| self.internal_migrate_or_create(entry));
233
234        if let Err(err) = r {
235            error!(?err, msg);
236            debug_assert!(false);
237        }
238
239        Ok(())
240    }
241
242    #[instrument(level = "debug", skip_all)]
243    /// - If the thing exists:
244    ///   - Ensure the set of attributes match and are present
245    ///     (but don't delete multivalue, or extended attributes in the situation.
246    /// - If not:
247    ///   - Create the entry
248    ///
249    /// This will extra classes an attributes alone!
250    ///
251    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
252    fn internal_migrate_or_create(
253        &mut self,
254        e: Entry<EntryInit, EntryNew>,
255    ) -> Result<(), OperationError> {
256        self.internal_migrate_or_create_ignore_attrs(e, &[])
257    }
258
259    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create] but it will ignore the specified
260    /// list of attributes, so that if an admin has modified those values then we don't
261    /// stomp them.
262    #[instrument(level = "trace", skip_all)]
263    fn internal_migrate_or_create_ignore_attrs(
264        &mut self,
265        mut e: Entry<EntryInit, EntryNew>,
266        attrs: &[Attribute],
267    ) -> Result<(), OperationError> {
268        trace!("operating on {:?}", e.get_uuid());
269
270        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
271            return Err(OperationError::FilterGeneration);
272        };
273
274        trace!("search {:?}", filt);
275
276        let results = self.internal_search(filt.clone())?;
277
278        if results.is_empty() {
279            // It does not exist. Create it.
280            self.internal_create(vec![e])
281        } else if results.len() == 1 {
282            // For each ignored attr, we remove it from entry.
283            for attr in attrs.iter() {
284                e.remove_ava(attr);
285            }
286
287            // If the thing is subset, pass
288            match e.gen_modlist_assert(&self.schema) {
289                Ok(modlist) => {
290                    // Apply to &results[0]
291                    trace!(?modlist);
292                    self.internal_modify(&filt, &modlist)
293                }
294                Err(e) => Err(OperationError::SchemaViolation(e)),
295            }
296        } else {
297            admin_error!(
298                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
299                filt,
300                results
301            );
302            Err(OperationError::InvalidDbState)
303        }
304    }
305
306    /// Migration domain level 8 to 9 (1.5.0)
307    #[instrument(level = "info", skip_all)]
308    pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
309        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
310            error!("Unable to raise domain level from 8 to 9.");
311            return Err(OperationError::MG0004DomainLevelInDevelopment);
312        }
313
314        // =========== Apply changes ==============
315        self.internal_migrate_or_create_batch(
316            "phase 1 - schema attrs",
317            migration_data::dl9::phase_1_schema_attrs(),
318        )?;
319
320        self.internal_migrate_or_create_batch(
321            "phase 2 - schema classes",
322            migration_data::dl9::phase_2_schema_classes(),
323        )?;
324
325        // Reload for the new schema.
326        self.reload()?;
327
328        // Reindex?
329        self.reindex(false)?;
330
331        // Set Phase
332        self.set_phase(ServerPhase::SchemaReady);
333
334        self.internal_migrate_or_create_batch(
335            "phase 3 - key provider",
336            migration_data::dl9::phase_3_key_provider(),
337        )?;
338
339        // Reload for the new key providers
340        self.reload()?;
341
342        self.internal_migrate_or_create_batch(
343            "phase 4 - system entries",
344            migration_data::dl9::phase_4_system_entries(),
345        )?;
346
347        // Reload for the new system entries
348        self.reload()?;
349
350        // Domain info is now ready and reloaded, we can proceed.
351        self.set_phase(ServerPhase::DomainInfoReady);
352
353        // Bring up the IDM entries.
354        self.internal_migrate_or_create_batch(
355            "phase 5 - builtin admin entries",
356            migration_data::dl9::phase_5_builtin_admin_entries()?,
357        )?;
358
359        self.internal_migrate_or_create_batch(
360            "phase 6 - builtin not admin entries",
361            migration_data::dl9::phase_6_builtin_non_admin_entries()?,
362        )?;
363
364        self.internal_migrate_or_create_batch(
365            "phase 7 - builtin access control profiles",
366            migration_data::dl9::phase_7_builtin_access_control_profiles(),
367        )?;
368
369        // Reload for all new access controls.
370        self.reload()?;
371
372        Ok(())
373    }
374
375    /// Patch Application - This triggers a one-shot fixup task for issue #3178
376    /// to force access controls to re-migrate in existing databases so that they're
377    /// content matches expected values.
378    #[instrument(level = "info", skip_all)]
379    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
380        admin_warn!("applying domain patch 2.");
381
382        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
383
384        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
385
386        idm_data
387            .into_iter()
388            .try_for_each(|entry| self.internal_migrate_or_create(entry))
389            .map_err(|err| {
390                error!(?err, "migrate_domain_patch_level_2 -> Error");
391                err
392            })?;
393
394        self.reload()?;
395
396        Ok(())
397    }
398
399    /// Migration domain level 9 to 10 (1.6.0)
400    #[instrument(level = "info", skip_all)]
401    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
402        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
403            error!("Unable to raise domain level from 9 to 10.");
404            return Err(OperationError::MG0004DomainLevelInDevelopment);
405        }
406
407        // =========== Apply changes ==============
408        self.internal_migrate_or_create_batch(
409            "phase 1 - schema attrs",
410            migration_data::dl10::phase_1_schema_attrs(),
411        )?;
412
413        self.internal_migrate_or_create_batch(
414            "phase 2 - schema classes",
415            migration_data::dl10::phase_2_schema_classes(),
416        )?;
417
418        // Reload for the new schema.
419        self.reload()?;
420
421        // Since we just loaded in a ton of schema, lets reindex it incase we added
422        // new indexes, or this is a bootstrap and we have no indexes yet.
423        self.reindex(false)?;
424
425        // Set Phase
426        // Indicate the schema is now ready, which allows dyngroups to work when they
427        // are created in the next phase of migrations.
428        self.set_phase(ServerPhase::SchemaReady);
429
430        self.internal_migrate_or_create_batch(
431            "phase 3 - key provider",
432            migration_data::dl10::phase_3_key_provider(),
433        )?;
434
435        // Reload for the new key providers
436        self.reload()?;
437
438        self.internal_migrate_or_create_batch(
439            "phase 4 - system entries",
440            migration_data::dl10::phase_4_system_entries(),
441        )?;
442
443        // Reload for the new system entries
444        self.reload()?;
445
446        // Domain info is now ready and reloaded, we can proceed.
447        self.set_phase(ServerPhase::DomainInfoReady);
448
449        // Bring up the IDM entries.
450        self.internal_migrate_or_create_batch(
451            "phase 5 - builtin admin entries",
452            migration_data::dl10::phase_5_builtin_admin_entries()?,
453        )?;
454
455        self.internal_migrate_or_create_batch(
456            "phase 6 - builtin not admin entries",
457            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
458        )?;
459
460        self.internal_migrate_or_create_batch(
461            "phase 7 - builtin access control profiles",
462            migration_data::dl10::phase_7_builtin_access_control_profiles(),
463        )?;
464
465        self.reload()?;
466
467        // =========== OAuth2 Cryptography Migration ==============
468
469        debug!("START OAUTH2 MIGRATION");
470
471        // Load all the OAuth2 providers.
472        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
473            Attribute::Class,
474            EntryClass::OAuth2ResourceServer.into()
475        )))?;
476
477        if !all_oauth2_rs_entries.is_empty() {
478            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
479                let entry_uuid = tgt_entry.get_uuid();
480                let mut modlist = ModifyList::new_list(vec![
481                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
482                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
483                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
484                    // Delete the fernet key, rs256 if any, and the es256 key
485                    Modify::Purged(Attribute::OAuth2RsTokenKey),
486                    Modify::Purged(Attribute::Es256PrivateKeyDer),
487                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
488                ]);
489
490                trace!(?tgt_entry);
491
492                // Import the ES256 Key
493                if let Some(es256_private_der) =
494                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
495                {
496                    modlist.push_mod(Modify::Present(
497                        Attribute::KeyActionImportJwsEs256,
498                        Value::PrivateBinary(es256_private_der.to_vec()),
499                    ))
500                } else {
501                    warn!("Unable to migrate es256 key");
502                }
503
504                let has_rs256 = tgt_entry
505                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
506                    .unwrap_or(false);
507
508                // If there is an rs256 key, import it.
509                // Import the RS256 Key
510                if has_rs256 {
511                    modlist.push_mod(Modify::Present(
512                        Attribute::Class,
513                        EntryClass::KeyObjectJwtEs256.to_value(),
514                    ));
515
516                    if let Some(rs256_private_der) =
517                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
518                    {
519                        modlist.push_mod(Modify::Present(
520                            Attribute::KeyActionImportJwsRs256,
521                            Value::PrivateBinary(rs256_private_der.to_vec()),
522                        ))
523                    } else {
524                        warn!("Unable to migrate rs256 key");
525                    }
526                }
527
528                (entry_uuid, modlist)
529            });
530
531            self.internal_batch_modify(entry_iter)?;
532        }
533
534        // Reload for new keys, and updated oauth2
535        self.reload()?;
536
537        // Done!
538
539        Ok(())
540    }
541
542    /// Migration domain level 10 to 11 (1.7.0)
543    #[instrument(level = "info", skip_all)]
544    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
545        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
546            error!("Unable to raise domain level from 10 to 11.");
547            return Err(OperationError::MG0004DomainLevelInDevelopment);
548        }
549
550        // =========== Apply changes ==============
551        self.internal_migrate_or_create_batch(
552            "phase 1 - schema attrs",
553            migration_data::dl11::phase_1_schema_attrs(),
554        )?;
555
556        self.internal_migrate_or_create_batch(
557            "phase 2 - schema classes",
558            migration_data::dl11::phase_2_schema_classes(),
559        )?;
560
561        // Reload for the new schema.
562        self.reload()?;
563
564        // Since we just loaded in a ton of schema, lets reindex it incase we added
565        // new indexes, or this is a bootstrap and we have no indexes yet.
566        self.reindex(false)?;
567
568        // Set Phase
569        // Indicate the schema is now ready, which allows dyngroups to work when they
570        // are created in the next phase of migrations.
571        self.set_phase(ServerPhase::SchemaReady);
572
573        self.internal_migrate_or_create_batch(
574            "phase 3 - key provider",
575            migration_data::dl11::phase_3_key_provider(),
576        )?;
577
578        // Reload for the new key providers
579        self.reload()?;
580
581        self.internal_migrate_or_create_batch(
582            "phase 4 - system entries",
583            migration_data::dl11::phase_4_system_entries(),
584        )?;
585
586        // Reload for the new system entries
587        self.reload()?;
588
589        // Domain info is now ready and reloaded, we can proceed.
590        self.set_phase(ServerPhase::DomainInfoReady);
591
592        // Bring up the IDM entries.
593        self.internal_migrate_or_create_batch(
594            "phase 5 - builtin admin entries",
595            migration_data::dl11::phase_5_builtin_admin_entries()?,
596        )?;
597
598        self.internal_migrate_or_create_batch(
599            "phase 6 - builtin not admin entries",
600            migration_data::dl11::phase_6_builtin_non_admin_entries()?,
601        )?;
602
603        self.internal_migrate_or_create_batch(
604            "phase 7 - builtin access control profiles",
605            migration_data::dl11::phase_7_builtin_access_control_profiles(),
606        )?;
607
608        self.reload()?;
609
610        Ok(())
611    }
612
613    /// Migration domain level 11 to 12 (1.8.0)
614    #[instrument(level = "info", skip_all)]
615    pub(crate) fn migrate_domain_11_to_12(&mut self) -> Result<(), OperationError> {
616        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_11 {
617            error!("Unable to raise domain level from 11 to 12.");
618            return Err(OperationError::MG0004DomainLevelInDevelopment);
619        }
620
621        // =========== Apply changes ==============
622        self.internal_migrate_or_create_batch(
623            "phase 1 - schema attrs",
624            migration_data::dl12::phase_1_schema_attrs(),
625        )?;
626
627        self.internal_migrate_or_create_batch(
628            "phase 2 - schema classes",
629            migration_data::dl12::phase_2_schema_classes(),
630        )?;
631
632        // Reload for the new schema.
633        self.reload()?;
634
635        // Since we just loaded in a ton of schema, lets reindex it incase we added
636        // new indexes, or this is a bootstrap and we have no indexes yet.
637        self.reindex(false)?;
638
639        // Set Phase
640        // Indicate the schema is now ready, which allows dyngroups to work when they
641        // are created in the next phase of migrations.
642        self.set_phase(ServerPhase::SchemaReady);
643
644        self.internal_migrate_or_create_batch(
645            "phase 3 - key provider",
646            migration_data::dl12::phase_3_key_provider(),
647        )?;
648
649        // Reload for the new key providers
650        self.reload()?;
651
652        self.internal_migrate_or_create_batch(
653            "phase 4 - system entries",
654            migration_data::dl12::phase_4_system_entries(),
655        )?;
656
657        // Reload for the new system entries
658        self.reload()?;
659
660        // Domain info is now ready and reloaded, we can proceed.
661        self.set_phase(ServerPhase::DomainInfoReady);
662
663        // Bring up the IDM entries.
664        self.internal_migrate_or_create_batch(
665            "phase 5 - builtin admin entries",
666            migration_data::dl12::phase_5_builtin_admin_entries()?,
667        )?;
668
669        self.internal_migrate_or_create_batch(
670            "phase 6 - builtin not admin entries",
671            migration_data::dl12::phase_6_builtin_non_admin_entries()?,
672        )?;
673
674        self.internal_migrate_or_create_batch(
675            "phase 7 - builtin access control profiles",
676            migration_data::dl12::phase_7_builtin_access_control_profiles(),
677        )?;
678
679        self.reload()?;
680
681        Ok(())
682    }
683
684    /// Migration domain level 12 to 13 (1.9.0)
685    #[instrument(level = "info", skip_all)]
686    pub(crate) fn migrate_domain_12_to_13(&mut self) -> Result<(), OperationError> {
687        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_12 {
688            error!("Unable to raise domain level from 12 to 13.");
689            return Err(OperationError::MG0004DomainLevelInDevelopment);
690        }
691
692        Ok(())
693    }
694
695    #[instrument(level = "info", skip_all)]
696    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
697        admin_debug!("initialise_schema_core -> start ...");
698        // Load in all the "core" schema, that we already have in "memory".
699        let entries = self.schema.to_entries();
700
701        // admin_debug!("Dumping schemas: {:?}", entries);
702
703        // internal_migrate_or_create.
704        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
705            trace!(?e, "init schema entry");
706            self.internal_migrate_or_create(e)
707        });
708        if r.is_ok() {
709            admin_debug!("initialise_schema_core -> Ok!");
710        } else {
711            admin_error!(?r, "initialise_schema_core -> Error");
712        }
713        // why do we have error handling if it's always supposed to be `Ok`?
714        debug_assert!(r.is_ok());
715        r
716    }
717}
718
719impl QueryServerReadTransaction<'_> {
720    /// Retrieve the domain info of this server
721    pub fn domain_upgrade_check(
722        &mut self,
723    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
724        let d_info = &self.d_info;
725
726        let name = d_info.d_name.clone();
727        let uuid = d_info.d_uuid;
728        let current_level = d_info.d_vers;
729        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
730
731        let mut report_items = Vec::with_capacity(1);
732
733        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
734            let item = self
735                .domain_upgrade_check_7_to_8_security_keys()
736                .map_err(|err| {
737                    error!(
738                        ?err,
739                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
740                    );
741                    err
742                })?;
743            report_items.push(item);
744
745            let item = self
746                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
747                .map_err(|err| {
748                    error!(
749                        ?err,
750                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
751                    );
752                    err
753                })?;
754            report_items.push(item);
755        }
756
757        Ok(ProtoDomainUpgradeCheckReport {
758            name,
759            uuid,
760            current_level,
761            upgrade_level,
762            report_items,
763        })
764    }
765
766    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
767        &mut self,
768    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
769        let filter = filter!(f_and!([
770            f_eq(Attribute::Class, EntryClass::Account.into()),
771            f_pres(Attribute::PrimaryCredential),
772        ]));
773
774        let results = self.internal_search(filter)?;
775
776        let affected_entries = results
777            .into_iter()
778            .filter_map(|entry| {
779                if entry
780                    .get_ava_single_credential(Attribute::PrimaryCredential)
781                    .map(|cred| cred.has_securitykey())
782                    .unwrap_or_default()
783                {
784                    Some(entry.get_display_id())
785                } else {
786                    None
787                }
788            })
789            .collect::<Vec<_>>();
790
791        let status = if affected_entries.is_empty() {
792            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
793        } else {
794            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
795        };
796
797        Ok(ProtoDomainUpgradeCheckItem {
798            status,
799            from_level: DOMAIN_LEVEL_7,
800            to_level: DOMAIN_LEVEL_8,
801            affected_entries,
802        })
803    }
804
805    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
806        &mut self,
807    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
808        let filter = filter!(f_and!([
809            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
810            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
811        ]));
812
813        let results = self.internal_search(filter)?;
814
815        let affected_entries = results
816            .into_iter()
817            .map(|entry| entry.get_display_id())
818            .collect::<Vec<_>>();
819
820        let status = if affected_entries.is_empty() {
821            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
822        } else {
823            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
824        };
825
826        Ok(ProtoDomainUpgradeCheckItem {
827            status,
828            from_level: DOMAIN_LEVEL_7,
829            to_level: DOMAIN_LEVEL_8,
830            affected_entries,
831        })
832    }
833}
834
835#[cfg(test)]
836mod tests {
837    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
838    use crate::prelude::*;
839
840    #[qs_test]
841    async fn test_init_idempotent_schema_core(server: &QueryServer) {
842        {
843            // Setup and abort.
844            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
845            assert!(server_txn.initialise_schema_core().is_ok());
846        }
847        {
848            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
849            assert!(server_txn.initialise_schema_core().is_ok());
850            assert!(server_txn.initialise_schema_core().is_ok());
851            assert!(server_txn.commit().is_ok());
852        }
853        {
854            // Now do it again in a new txn, but abort
855            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
856            assert!(server_txn.initialise_schema_core().is_ok());
857        }
858        {
859            // Now do it again in a new txn.
860            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
861            assert!(server_txn.initialise_schema_core().is_ok());
862            assert!(server_txn.commit().is_ok());
863        }
864    }
865
866    #[qs_test(domain_level=DOMAIN_LEVEL_9)]
867    async fn test_migrations_dl9_dl10(server: &QueryServer) {
868        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
869
870        let db_domain_version = write_txn
871            .internal_search_uuid(UUID_DOMAIN_INFO)
872            .expect("unable to access domain entry")
873            .get_ava_single_uint32(Attribute::Version)
874            .expect("Attribute Version not present");
875
876        assert_eq!(db_domain_version, DOMAIN_LEVEL_9);
877
878        write_txn.commit().expect("Unable to commit");
879
880        // == pre migration verification. ==
881        // check we currently would fail a migration.
882
883        // let mut read_txn = server.read().await.unwrap();
884        // drop(read_txn);
885
886        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
887
888        // Fix any issues
889
890        // == Increase the version ==
891        write_txn
892            .internal_apply_domain_migration(DOMAIN_LEVEL_10)
893            .expect("Unable to set domain level to version 10");
894
895        // post migration verification.
896
897        write_txn.commit().expect("Unable to commit");
898    }
899
900    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
901    async fn test_migrations_dl10_dl11(server: &QueryServer) {
902        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
903
904        let db_domain_version = write_txn
905            .internal_search_uuid(UUID_DOMAIN_INFO)
906            .expect("unable to access domain entry")
907            .get_ava_single_uint32(Attribute::Version)
908            .expect("Attribute Version not present");
909
910        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
911
912        write_txn.commit().expect("Unable to commit");
913
914        // == pre migration verification. ==
915        // check we currently would fail a migration.
916
917        // let mut read_txn = server.read().await.unwrap();
918        // drop(read_txn);
919
920        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
921
922        // Fix any issues
923
924        // == Increase the version ==
925        write_txn
926            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
927            .expect("Unable to set domain level to version 11");
928
929        // post migration verification.
930
931        write_txn.commit().expect("Unable to commit");
932    }
933
934    #[qs_test(domain_level=DOMAIN_LEVEL_11)]
935    async fn test_migrations_dl11_dl12(server: &QueryServer) {
936        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
937
938        let db_domain_version = write_txn
939            .internal_search_uuid(UUID_DOMAIN_INFO)
940            .expect("unable to access domain entry")
941            .get_ava_single_uint32(Attribute::Version)
942            .expect("Attribute Version not present");
943
944        assert_eq!(db_domain_version, DOMAIN_LEVEL_11);
945
946        write_txn.commit().expect("Unable to commit");
947
948        // == pre migration verification. ==
949        // check we currently would fail a migration.
950
951        // let mut read_txn = server.read().await.unwrap();
952        // drop(read_txn);
953
954        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
955
956        // Fix any issues
957
958        // == Increase the version ==
959        write_txn
960            .internal_apply_domain_migration(DOMAIN_LEVEL_12)
961            .expect("Unable to set domain level to version 12");
962
963        // post migration verification.
964
965        write_txn.commit().expect("Unable to commit");
966    }
967}