kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // No domain info was present, so neither was the rest of the IDM. Bring up the
58            // full IDM here.
59            match domain_target_level {
60                DOMAIN_LEVEL_8 => write_txn.migrate_domain_7_to_8()?,
61                DOMAIN_LEVEL_9 => write_txn.migrate_domain_8_to_9()?,
62                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
63                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
64                DOMAIN_LEVEL_12 => write_txn.migrate_domain_11_to_12()?,
65                _ => {
66                    error!("Invalid requested domain target level for server bootstrap");
67                    debug_assert!(false);
68                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
69                }
70            }
71        } else {
72            // Domain info was present, so we need to reflect that in our server
73            // domain structures. If we don't do this, the in memory domain level
74            // is stuck at 0 which can confuse init domain info below.
75            //
76            // This also is where the former domain taint flag will be loaded to
77            // d_info so that if the *previous* execution of the database was
78            // a devel version, we'll still trigger the forced remigration in
79            // in the case that we are moving from dev -> stable.
80            write_txn.force_domain_reload();
81
82            write_txn.reload()?;
83
84            // Indicate the schema is now ready, which allows dyngroups to work when they
85            // are created in the next phase of migrations.
86            write_txn.set_phase(ServerPhase::SchemaReady);
87
88            // #2756 - if we *aren't* creating the base IDM entries, then we
89            // need to force dyn groups to reload since we're now at schema
90            // ready. This is done indirectly by ... reloading the schema again.
91            //
92            // This is because dyngroups don't load until server phase >= schemaready
93            // and the reload path for these is either a change in the dyngroup entry
94            // itself or a change to schema reloading. Since we aren't changing the
95            // dyngroup here, we have to go via the schema reload path.
96            write_txn.force_schema_reload();
97
98            // Reload as init idm affects access controls.
99            write_txn.reload()?;
100
101            // Domain info is now ready and reloaded, we can proceed.
102            write_txn.set_phase(ServerPhase::DomainInfoReady);
103        }
104
105        // This is the start of domain info related migrations which we will need in future
106        // to handle replication. Due to the access control rework, and the addition of "managed by"
107        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
108        // by changing domain infos to be incompatible during this phase.
109
110        // The reloads will have populated this structure now.
111        let domain_info_version = write_txn.get_domain_version();
112        let domain_patch_level = write_txn.get_domain_patch_level();
113        let domain_development_taint = write_txn.get_domain_development_taint();
114        debug!(
115            ?db_domain_version,
116            ?domain_patch_level,
117            ?domain_development_taint,
118            "After setting internal domain info"
119        );
120
121        let mut reload_required = false;
122
123        // If the database domain info is a lower version than our target level, we reload.
124        if domain_info_version < domain_target_level {
125            write_txn
126                .internal_apply_domain_migration(domain_target_level)
127                .map(|()| {
128                    warn!("Domain level has been raised to {}", domain_target_level);
129                })?;
130            // Reload if anything in migrations requires it - this triggers the domain migrations
131            // which in turn can trigger schema reloads etc. If the server was just brought up
132            // then we don't need the extra reload since we are already at the correct
133            // version of the server, and this call to set the target level is just for persistance
134            // of the value.
135            if domain_info_version != 0 {
136                reload_required = true;
137            }
138        } else if domain_development_taint {
139            // This forces pre-release versions to re-migrate each start up. This solves
140            // the domain-version-sprawl issue so that during a development cycle we can
141            // do a single domain version bump, and continue to extend the migrations
142            // within that release cycle to contain what we require.
143            //
144            // If this is a pre-release build
145            // AND
146            // we are NOT in a test environment
147            // AND
148            // We did not already need a version migration as above
149            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
150
151            reload_required = true;
152        }
153
154        // If we are new enough to support patches, and we are lower than the target patch level
155        // then a reload will be applied after we raise the patch level.
156        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
157            write_txn
158                .internal_modify_uuid(
159                    UUID_DOMAIN_INFO,
160                    &ModifyList::new_purge_and_set(
161                        Attribute::PatchLevel,
162                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
163                    ),
164                )
165                .map(|()| {
166                    warn!(
167                        "Domain patch level has been raised to {}",
168                        domain_patch_level
169                    );
170                })?;
171
172            reload_required = true;
173        };
174
175        // Execute whatever operations we have batched up and ready to go. This is needed
176        // to preserve ordering of the operations - if we reloaded after a remigrate then
177        // we would have skipped the patch level fix which needs to have occurred *first*.
178        if reload_required {
179            write_txn.reload()?;
180        }
181
182        // Now set the db/domain devel taint flag to match our current release status
183        // if it changes. This is what breaks the cycle of db taint from dev -> stable
184        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
185        if current_devel_flag {
186            warn!("Domain Development Taint mode is enabled");
187        }
188        if domain_development_taint != current_devel_flag {
189            write_txn.internal_modify_uuid(
190                UUID_DOMAIN_INFO,
191                &ModifyList::new_purge_and_set(
192                    Attribute::DomainDevelopmentTaint,
193                    Value::Bool(current_devel_flag),
194                ),
195            )?;
196        }
197
198        // We are ready to run
199        write_txn.set_phase(ServerPhase::Running);
200
201        // Commit all changes, this also triggers the final reload, this should be a no-op
202        // since we already did all the needed loads above.
203        write_txn.commit()?;
204
205        debug!("Database version check and migrations success! ☀️  ");
206        Ok(())
207    }
208}
209
210impl QueryServerWriteTransaction<'_> {
211    /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
212    /// level.
213    pub(crate) fn internal_apply_domain_migration(
214        &mut self,
215        to_level: u32,
216    ) -> Result<(), OperationError> {
217        assert!(to_level > self.get_domain_version());
218        self.internal_modify_uuid(
219            UUID_DOMAIN_INFO,
220            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
221        )
222        .and_then(|()| self.reload())
223    }
224
225    fn internal_migrate_or_create_batch(
226        &mut self,
227        msg: &str,
228        entries: Vec<EntryInitNew>,
229    ) -> Result<(), OperationError> {
230        let r: Result<(), _> = entries
231            .into_iter()
232            .try_for_each(|entry| self.internal_migrate_or_create(entry));
233
234        if let Err(err) = r {
235            error!(?err, msg);
236            debug_assert!(false);
237        }
238
239        Ok(())
240    }
241
242    #[instrument(level = "debug", skip_all)]
243    /// - If the thing exists:
244    ///   - Ensure the set of attributes match and are present
245    ///     (but don't delete multivalue, or extended attributes in the situation.
246    /// - If not:
247    ///   - Create the entry
248    ///
249    /// This will extra classes an attributes alone!
250    ///
251    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
252    fn internal_migrate_or_create(
253        &mut self,
254        e: Entry<EntryInit, EntryNew>,
255    ) -> Result<(), OperationError> {
256        self.internal_migrate_or_create_ignore_attrs(e, &[])
257    }
258
259    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create] but it will ignore the specified
260    /// list of attributes, so that if an admin has modified those values then we don't
261    /// stomp them.
262    #[instrument(level = "trace", skip_all)]
263    fn internal_migrate_or_create_ignore_attrs(
264        &mut self,
265        mut e: Entry<EntryInit, EntryNew>,
266        attrs: &[Attribute],
267    ) -> Result<(), OperationError> {
268        trace!("operating on {:?}", e.get_uuid());
269
270        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
271            return Err(OperationError::FilterGeneration);
272        };
273
274        trace!("search {:?}", filt);
275
276        let results = self.internal_search(filt.clone())?;
277
278        if results.is_empty() {
279            // It does not exist. Create it.
280            self.internal_create(vec![e])
281        } else if results.len() == 1 {
282            // For each ignored attr, we remove it from entry.
283            for attr in attrs.iter() {
284                e.remove_ava(attr);
285            }
286
287            // If the thing is subset, pass
288            match e.gen_modlist_assert(&self.schema) {
289                Ok(modlist) => {
290                    // Apply to &results[0]
291                    trace!(?modlist);
292                    self.internal_modify(&filt, &modlist)
293                }
294                Err(e) => Err(OperationError::SchemaViolation(e)),
295            }
296        } else {
297            admin_error!(
298                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
299                filt,
300                results
301            );
302            Err(OperationError::InvalidDbState)
303        }
304    }
305
306    /// Migration domain level 7 to 8 (1.4.0)
307    #[instrument(level = "info", skip_all)]
308    pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> {
309        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
310            error!("Unable to raise domain level from 8 to 9.");
311            return Err(OperationError::MG0004DomainLevelInDevelopment);
312        }
313
314        // =========== Apply changes ==============
315        self.internal_migrate_or_create_batch(
316            "phase 1 - schema attrs",
317            migration_data::dl8::phase_1_schema_attrs(),
318        )?;
319
320        self.internal_migrate_or_create_batch(
321            "phase 2 - schema classes",
322            migration_data::dl8::phase_2_schema_classes(),
323        )?;
324
325        // Reload for the new schema.
326        self.reload()?;
327
328        // Reindex?
329        self.reindex(false)?;
330
331        // Set Phase
332        self.set_phase(ServerPhase::SchemaReady);
333
334        self.internal_migrate_or_create_batch(
335            "phase 3 - key provider",
336            migration_data::dl8::phase_3_key_provider(),
337        )?;
338
339        // Reload for the new key providers
340        self.reload()?;
341
342        self.internal_migrate_or_create_batch(
343            "phase 4 - system entries",
344            migration_data::dl8::phase_4_system_entries(),
345        )?;
346
347        // Reload for the new system entries
348        self.reload()?;
349
350        // Domain info is now ready and reloaded, we can proceed.
351        self.set_phase(ServerPhase::DomainInfoReady);
352
353        // Bring up the IDM entries.
354        self.internal_migrate_or_create_batch(
355            "phase 5 - builtin admin entries",
356            migration_data::dl8::phase_5_builtin_admin_entries()?,
357        )?;
358
359        self.internal_migrate_or_create_batch(
360            "phase 6 - builtin not admin entries",
361            migration_data::dl8::phase_6_builtin_non_admin_entries()?,
362        )?;
363
364        self.internal_migrate_or_create_batch(
365            "phase 7 - builtin access control profiles",
366            migration_data::dl8::phase_7_builtin_access_control_profiles(),
367        )?;
368
369        // Reload for all new access controls.
370        self.reload()?;
371
372        Ok(())
373    }
374
375    /// Migration domain level 8 to 9 (1.5.0)
376    #[instrument(level = "info", skip_all)]
377    pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
378        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
379            error!("Unable to raise domain level from 8 to 9.");
380            return Err(OperationError::MG0004DomainLevelInDevelopment);
381        }
382
383        // =========== Apply changes ==============
384        self.internal_migrate_or_create_batch(
385            "phase 1 - schema attrs",
386            migration_data::dl9::phase_1_schema_attrs(),
387        )?;
388
389        self.internal_migrate_or_create_batch(
390            "phase 2 - schema classes",
391            migration_data::dl9::phase_2_schema_classes(),
392        )?;
393
394        // Reload for the new schema.
395        self.reload()?;
396
397        // Reindex?
398        self.reindex(false)?;
399
400        // Set Phase
401        self.set_phase(ServerPhase::SchemaReady);
402
403        self.internal_migrate_or_create_batch(
404            "phase 3 - key provider",
405            migration_data::dl9::phase_3_key_provider(),
406        )?;
407
408        // Reload for the new key providers
409        self.reload()?;
410
411        self.internal_migrate_or_create_batch(
412            "phase 4 - system entries",
413            migration_data::dl9::phase_4_system_entries(),
414        )?;
415
416        // Reload for the new system entries
417        self.reload()?;
418
419        // Domain info is now ready and reloaded, we can proceed.
420        self.set_phase(ServerPhase::DomainInfoReady);
421
422        // Bring up the IDM entries.
423        self.internal_migrate_or_create_batch(
424            "phase 5 - builtin admin entries",
425            migration_data::dl9::phase_5_builtin_admin_entries()?,
426        )?;
427
428        self.internal_migrate_or_create_batch(
429            "phase 6 - builtin not admin entries",
430            migration_data::dl9::phase_6_builtin_non_admin_entries()?,
431        )?;
432
433        self.internal_migrate_or_create_batch(
434            "phase 7 - builtin access control profiles",
435            migration_data::dl9::phase_7_builtin_access_control_profiles(),
436        )?;
437
438        // Reload for all new access controls.
439        self.reload()?;
440
441        Ok(())
442    }
443
444    /// Patch Application - This triggers a one-shot fixup task for issue #3178
445    /// to force access controls to re-migrate in existing databases so that they're
446    /// content matches expected values.
447    #[instrument(level = "info", skip_all)]
448    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
449        admin_warn!("applying domain patch 2.");
450
451        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
452
453        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
454
455        idm_data
456            .into_iter()
457            .try_for_each(|entry| self.internal_migrate_or_create(entry))
458            .map_err(|err| {
459                error!(?err, "migrate_domain_patch_level_2 -> Error");
460                err
461            })?;
462
463        self.reload()?;
464
465        Ok(())
466    }
467
468    /// Migration domain level 9 to 10 (1.6.0)
469    #[instrument(level = "info", skip_all)]
470    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
471        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
472            error!("Unable to raise domain level from 9 to 10.");
473            return Err(OperationError::MG0004DomainLevelInDevelopment);
474        }
475
476        // =========== Apply changes ==============
477        self.internal_migrate_or_create_batch(
478            "phase 1 - schema attrs",
479            migration_data::dl10::phase_1_schema_attrs(),
480        )?;
481
482        self.internal_migrate_or_create_batch(
483            "phase 2 - schema classes",
484            migration_data::dl10::phase_2_schema_classes(),
485        )?;
486
487        // Reload for the new schema.
488        self.reload()?;
489
490        // Since we just loaded in a ton of schema, lets reindex it incase we added
491        // new indexes, or this is a bootstrap and we have no indexes yet.
492        self.reindex(false)?;
493
494        // Set Phase
495        // Indicate the schema is now ready, which allows dyngroups to work when they
496        // are created in the next phase of migrations.
497        self.set_phase(ServerPhase::SchemaReady);
498
499        self.internal_migrate_or_create_batch(
500            "phase 3 - key provider",
501            migration_data::dl10::phase_3_key_provider(),
502        )?;
503
504        // Reload for the new key providers
505        self.reload()?;
506
507        self.internal_migrate_or_create_batch(
508            "phase 4 - system entries",
509            migration_data::dl10::phase_4_system_entries(),
510        )?;
511
512        // Reload for the new system entries
513        self.reload()?;
514
515        // Domain info is now ready and reloaded, we can proceed.
516        self.set_phase(ServerPhase::DomainInfoReady);
517
518        // Bring up the IDM entries.
519        self.internal_migrate_or_create_batch(
520            "phase 5 - builtin admin entries",
521            migration_data::dl10::phase_5_builtin_admin_entries()?,
522        )?;
523
524        self.internal_migrate_or_create_batch(
525            "phase 6 - builtin not admin entries",
526            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
527        )?;
528
529        self.internal_migrate_or_create_batch(
530            "phase 7 - builtin access control profiles",
531            migration_data::dl10::phase_7_builtin_access_control_profiles(),
532        )?;
533
534        self.reload()?;
535
536        // =========== OAuth2 Cryptography Migration ==============
537
538        debug!("START OAUTH2 MIGRATION");
539
540        // Load all the OAuth2 providers.
541        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
542            Attribute::Class,
543            EntryClass::OAuth2ResourceServer.into()
544        )))?;
545
546        if !all_oauth2_rs_entries.is_empty() {
547            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
548                let entry_uuid = tgt_entry.get_uuid();
549                let mut modlist = ModifyList::new_list(vec![
550                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
551                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
552                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
553                    // Delete the fernet key, rs256 if any, and the es256 key
554                    Modify::Purged(Attribute::OAuth2RsTokenKey),
555                    Modify::Purged(Attribute::Es256PrivateKeyDer),
556                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
557                ]);
558
559                trace!(?tgt_entry);
560
561                // Import the ES256 Key
562                if let Some(es256_private_der) =
563                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
564                {
565                    modlist.push_mod(Modify::Present(
566                        Attribute::KeyActionImportJwsEs256,
567                        Value::PrivateBinary(es256_private_der.to_vec()),
568                    ))
569                } else {
570                    warn!("Unable to migrate es256 key");
571                }
572
573                let has_rs256 = tgt_entry
574                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
575                    .unwrap_or(false);
576
577                // If there is an rs256 key, import it.
578                // Import the RS256 Key
579                if has_rs256 {
580                    modlist.push_mod(Modify::Present(
581                        Attribute::Class,
582                        EntryClass::KeyObjectJwtEs256.to_value(),
583                    ));
584
585                    if let Some(rs256_private_der) =
586                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
587                    {
588                        modlist.push_mod(Modify::Present(
589                            Attribute::KeyActionImportJwsRs256,
590                            Value::PrivateBinary(rs256_private_der.to_vec()),
591                        ))
592                    } else {
593                        warn!("Unable to migrate rs256 key");
594                    }
595                }
596
597                (entry_uuid, modlist)
598            });
599
600            self.internal_batch_modify(entry_iter)?;
601        }
602
603        // Reload for new keys, and updated oauth2
604        self.reload()?;
605
606        // Done!
607
608        Ok(())
609    }
610
611    /// Migration domain level 10 to 11 (1.7.0)
612    #[instrument(level = "info", skip_all)]
613    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
614        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
615            error!("Unable to raise domain level from 10 to 11.");
616            return Err(OperationError::MG0004DomainLevelInDevelopment);
617        }
618
619        // =========== Apply changes ==============
620        self.internal_migrate_or_create_batch(
621            "phase 1 - schema attrs",
622            migration_data::dl11::phase_1_schema_attrs(),
623        )?;
624
625        self.internal_migrate_or_create_batch(
626            "phase 2 - schema classes",
627            migration_data::dl11::phase_2_schema_classes(),
628        )?;
629
630        // Reload for the new schema.
631        self.reload()?;
632
633        // Since we just loaded in a ton of schema, lets reindex it incase we added
634        // new indexes, or this is a bootstrap and we have no indexes yet.
635        self.reindex(false)?;
636
637        // Set Phase
638        // Indicate the schema is now ready, which allows dyngroups to work when they
639        // are created in the next phase of migrations.
640        self.set_phase(ServerPhase::SchemaReady);
641
642        self.internal_migrate_or_create_batch(
643            "phase 3 - key provider",
644            migration_data::dl11::phase_3_key_provider(),
645        )?;
646
647        // Reload for the new key providers
648        self.reload()?;
649
650        self.internal_migrate_or_create_batch(
651            "phase 4 - system entries",
652            migration_data::dl11::phase_4_system_entries(),
653        )?;
654
655        // Reload for the new system entries
656        self.reload()?;
657
658        // Domain info is now ready and reloaded, we can proceed.
659        self.set_phase(ServerPhase::DomainInfoReady);
660
661        // Bring up the IDM entries.
662        self.internal_migrate_or_create_batch(
663            "phase 5 - builtin admin entries",
664            migration_data::dl11::phase_5_builtin_admin_entries()?,
665        )?;
666
667        self.internal_migrate_or_create_batch(
668            "phase 6 - builtin not admin entries",
669            migration_data::dl11::phase_6_builtin_non_admin_entries()?,
670        )?;
671
672        self.internal_migrate_or_create_batch(
673            "phase 7 - builtin access control profiles",
674            migration_data::dl11::phase_7_builtin_access_control_profiles(),
675        )?;
676
677        self.reload()?;
678
679        Ok(())
680    }
681
682    /// Migration domain level 11 to 12 (1.8.0)
683    #[instrument(level = "info", skip_all)]
684    pub(crate) fn migrate_domain_11_to_12(&mut self) -> Result<(), OperationError> {
685        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_11 {
686            error!("Unable to raise domain level from 11 to 12.");
687            return Err(OperationError::MG0004DomainLevelInDevelopment);
688        }
689
690        Ok(())
691    }
692
693    #[instrument(level = "info", skip_all)]
694    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
695        admin_debug!("initialise_schema_core -> start ...");
696        // Load in all the "core" schema, that we already have in "memory".
697        let entries = self.schema.to_entries();
698
699        // admin_debug!("Dumping schemas: {:?}", entries);
700
701        // internal_migrate_or_create.
702        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
703            trace!(?e, "init schema entry");
704            self.internal_migrate_or_create(e)
705        });
706        if r.is_ok() {
707            admin_debug!("initialise_schema_core -> Ok!");
708        } else {
709            admin_error!(?r, "initialise_schema_core -> Error");
710        }
711        // why do we have error handling if it's always supposed to be `Ok`?
712        debug_assert!(r.is_ok());
713        r
714    }
715}
716
717impl QueryServerReadTransaction<'_> {
718    /// Retrieve the domain info of this server
719    pub fn domain_upgrade_check(
720        &mut self,
721    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
722        let d_info = &self.d_info;
723
724        let name = d_info.d_name.clone();
725        let uuid = d_info.d_uuid;
726        let current_level = d_info.d_vers;
727        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
728
729        let mut report_items = Vec::with_capacity(1);
730
731        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
732            let item = self
733                .domain_upgrade_check_7_to_8_security_keys()
734                .map_err(|err| {
735                    error!(
736                        ?err,
737                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
738                    );
739                    err
740                })?;
741            report_items.push(item);
742
743            let item = self
744                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
745                .map_err(|err| {
746                    error!(
747                        ?err,
748                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
749                    );
750                    err
751                })?;
752            report_items.push(item);
753        }
754
755        Ok(ProtoDomainUpgradeCheckReport {
756            name,
757            uuid,
758            current_level,
759            upgrade_level,
760            report_items,
761        })
762    }
763
764    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
765        &mut self,
766    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
767        let filter = filter!(f_and!([
768            f_eq(Attribute::Class, EntryClass::Account.into()),
769            f_pres(Attribute::PrimaryCredential),
770        ]));
771
772        let results = self.internal_search(filter)?;
773
774        let affected_entries = results
775            .into_iter()
776            .filter_map(|entry| {
777                if entry
778                    .get_ava_single_credential(Attribute::PrimaryCredential)
779                    .map(|cred| cred.has_securitykey())
780                    .unwrap_or_default()
781                {
782                    Some(entry.get_display_id())
783                } else {
784                    None
785                }
786            })
787            .collect::<Vec<_>>();
788
789        let status = if affected_entries.is_empty() {
790            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
791        } else {
792            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
793        };
794
795        Ok(ProtoDomainUpgradeCheckItem {
796            status,
797            from_level: DOMAIN_LEVEL_7,
798            to_level: DOMAIN_LEVEL_8,
799            affected_entries,
800        })
801    }
802
803    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
804        &mut self,
805    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
806        let filter = filter!(f_and!([
807            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
808            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
809        ]));
810
811        let results = self.internal_search(filter)?;
812
813        let affected_entries = results
814            .into_iter()
815            .map(|entry| entry.get_display_id())
816            .collect::<Vec<_>>();
817
818        let status = if affected_entries.is_empty() {
819            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
820        } else {
821            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
822        };
823
824        Ok(ProtoDomainUpgradeCheckItem {
825            status,
826            from_level: DOMAIN_LEVEL_7,
827            to_level: DOMAIN_LEVEL_8,
828            affected_entries,
829        })
830    }
831}
832
833#[cfg(test)]
834mod tests {
835    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
836    use crate::prelude::*;
837
838    #[qs_test]
839    async fn test_init_idempotent_schema_core(server: &QueryServer) {
840        {
841            // Setup and abort.
842            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
843            assert!(server_txn.initialise_schema_core().is_ok());
844        }
845        {
846            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
847            assert!(server_txn.initialise_schema_core().is_ok());
848            assert!(server_txn.initialise_schema_core().is_ok());
849            assert!(server_txn.commit().is_ok());
850        }
851        {
852            // Now do it again in a new txn, but abort
853            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
854            assert!(server_txn.initialise_schema_core().is_ok());
855        }
856        {
857            // Now do it again in a new txn.
858            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
859            assert!(server_txn.initialise_schema_core().is_ok());
860            assert!(server_txn.commit().is_ok());
861        }
862    }
863
864    #[qs_test(domain_level=DOMAIN_LEVEL_8)]
865    async fn test_migrations_dl8_dl9(server: &QueryServer) {
866        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
867
868        let db_domain_version = write_txn
869            .internal_search_uuid(UUID_DOMAIN_INFO)
870            .expect("unable to access domain entry")
871            .get_ava_single_uint32(Attribute::Version)
872            .expect("Attribute Version not present");
873
874        assert_eq!(db_domain_version, DOMAIN_LEVEL_8);
875
876        write_txn.commit().expect("Unable to commit");
877
878        // == pre migration verification. ==
879        // check we currently would fail a migration.
880
881        // let mut read_txn = server.read().await.unwrap();
882        // drop(read_txn);
883
884        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
885
886        // Fix any issues
887
888        // == Increase the version ==
889        write_txn
890            .internal_apply_domain_migration(DOMAIN_LEVEL_9)
891            .expect("Unable to set domain level to version 9");
892
893        // post migration verification.
894
895        write_txn.commit().expect("Unable to commit");
896    }
897
898    #[qs_test(domain_level=DOMAIN_LEVEL_9)]
899    async fn test_migrations_dl9_dl10(server: &QueryServer) {
900        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
901
902        let db_domain_version = write_txn
903            .internal_search_uuid(UUID_DOMAIN_INFO)
904            .expect("unable to access domain entry")
905            .get_ava_single_uint32(Attribute::Version)
906            .expect("Attribute Version not present");
907
908        assert_eq!(db_domain_version, DOMAIN_LEVEL_9);
909
910        write_txn.commit().expect("Unable to commit");
911
912        // == pre migration verification. ==
913        // check we currently would fail a migration.
914
915        // let mut read_txn = server.read().await.unwrap();
916        // drop(read_txn);
917
918        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
919
920        // Fix any issues
921
922        // == Increase the version ==
923        write_txn
924            .internal_apply_domain_migration(DOMAIN_LEVEL_10)
925            .expect("Unable to set domain level to version 10");
926
927        // post migration verification.
928
929        write_txn.commit().expect("Unable to commit");
930    }
931
932    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
933    async fn test_migrations_dl10_dl11(server: &QueryServer) {
934        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
935
936        let db_domain_version = write_txn
937            .internal_search_uuid(UUID_DOMAIN_INFO)
938            .expect("unable to access domain entry")
939            .get_ava_single_uint32(Attribute::Version)
940            .expect("Attribute Version not present");
941
942        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
943
944        write_txn.commit().expect("Unable to commit");
945
946        // == pre migration verification. ==
947        // check we currently would fail a migration.
948
949        // let mut read_txn = server.read().await.unwrap();
950        // drop(read_txn);
951
952        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
953
954        // Fix any issues
955
956        // == Increase the version ==
957        write_txn
958            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
959            .expect("Unable to set domain level to version 11");
960
961        // post migration verification.
962
963        write_txn.commit().expect("Unable to commit");
964    }
965}