kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // No domain info was present, so neither was the rest of the IDM. Bring up the
58            // full IDM here.
59            match domain_target_level {
60                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
61                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
62                DOMAIN_LEVEL_12 => write_txn.migrate_domain_11_to_12()?,
63                DOMAIN_LEVEL_13 => write_txn.migrate_domain_12_to_13()?,
64                DOMAIN_LEVEL_14 => write_txn.migrate_domain_13_to_14()?,
65                _ => {
66                    error!("Invalid requested domain target level for server bootstrap");
67                    debug_assert!(false);
68                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
69                }
70            }
71        } else {
72            // Domain info was present, so we need to reflect that in our server
73            // domain structures. If we don't do this, the in memory domain level
74            // is stuck at 0 which can confuse init domain info below.
75            //
76            // This also is where the former domain taint flag will be loaded to
77            // d_info so that if the *previous* execution of the database was
78            // a devel version, we'll still trigger the forced remigration in
79            // in the case that we are moving from dev -> stable.
80            write_txn.force_domain_reload();
81
82            write_txn.reload()?;
83
84            // Indicate the schema is now ready, which allows dyngroups to work when they
85            // are created in the next phase of migrations.
86            write_txn.set_phase(ServerPhase::SchemaReady);
87
88            // #2756 - if we *aren't* creating the base IDM entries, then we
89            // need to force dyn groups to reload since we're now at schema
90            // ready. This is done indirectly by ... reloading the schema again.
91            //
92            // This is because dyngroups don't load until server phase >= schemaready
93            // and the reload path for these is either a change in the dyngroup entry
94            // itself or a change to schema reloading. Since we aren't changing the
95            // dyngroup here, we have to go via the schema reload path.
96            write_txn.force_schema_reload();
97
98            // Reload as init idm affects access controls.
99            write_txn.reload()?;
100
101            // Domain info is now ready and reloaded, we can proceed.
102            write_txn.set_phase(ServerPhase::DomainInfoReady);
103        }
104
105        // This is the start of domain info related migrations which we will need in future
106        // to handle replication. Due to the access control rework, and the addition of "managed by"
107        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
108        // by changing domain infos to be incompatible during this phase.
109
110        // The reloads will have populated this structure now.
111        let domain_info_version = write_txn.get_domain_version();
112        let domain_patch_level = write_txn.get_domain_patch_level();
113        let domain_development_taint = write_txn.get_domain_development_taint();
114        debug!(
115            ?db_domain_version,
116            ?domain_patch_level,
117            ?domain_development_taint,
118            "After setting internal domain info"
119        );
120
121        let mut reload_required = false;
122
123        // If the database domain info is a lower version than our target level, we reload.
124        if domain_info_version < domain_target_level {
125            write_txn
126                .internal_apply_domain_migration(domain_target_level)
127                .map(|()| {
128                    warn!("Domain level has been raised to {}", domain_target_level);
129                })?;
130            // Reload if anything in migrations requires it - this triggers the domain migrations
131            // which in turn can trigger schema reloads etc. If the server was just brought up
132            // then we don't need the extra reload since we are already at the correct
133            // version of the server, and this call to set the target level is just for persistence
134            // of the value.
135            if domain_info_version != 0 {
136                reload_required = true;
137            }
138        } else if domain_development_taint {
139            // This forces pre-release versions to re-migrate each start up. This solves
140            // the domain-version-sprawl issue so that during a development cycle we can
141            // do a single domain version bump, and continue to extend the migrations
142            // within that release cycle to contain what we require.
143            //
144            // If this is a pre-release build
145            // AND
146            // we are NOT in a test environment
147            // AND
148            // We did not already need a version migration as above
149            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
150
151            reload_required = true;
152        }
153
154        // If we are new enough to support patches, and we are lower than the target patch level
155        // then a reload will be applied after we raise the patch level.
156        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
157            write_txn
158                .internal_modify_uuid(
159                    UUID_DOMAIN_INFO,
160                    &ModifyList::new_purge_and_set(
161                        Attribute::PatchLevel,
162                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
163                    ),
164                )
165                .map(|()| {
166                    warn!(
167                        "Domain patch level has been raised to {}",
168                        domain_patch_level
169                    );
170                })?;
171
172            reload_required = true;
173        };
174
175        // Execute whatever operations we have batched up and ready to go. This is needed
176        // to preserve ordering of the operations - if we reloaded after a remigrate then
177        // we would have skipped the patch level fix which needs to have occurred *first*.
178        if reload_required {
179            write_txn.reload()?;
180        }
181
182        // Now set the db/domain devel taint flag to match our current release status
183        // if it changes. This is what breaks the cycle of db taint from dev -> stable
184        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
185        if current_devel_flag {
186            warn!("Domain Development Taint mode is enabled");
187        }
188        if domain_development_taint != current_devel_flag {
189            write_txn.internal_modify_uuid(
190                UUID_DOMAIN_INFO,
191                &ModifyList::new_purge_and_set(
192                    Attribute::DomainDevelopmentTaint,
193                    Value::Bool(current_devel_flag),
194                ),
195            )?;
196        }
197
198        // We are ready to run
199        write_txn.set_phase(ServerPhase::Running);
200
201        // Commit all changes, this also triggers the final reload, this should be a no-op
202        // since we already did all the needed loads above.
203        write_txn.commit()?;
204
205        debug!("Database version check and migrations success! ☀️  ");
206        Ok(())
207    }
208}
209
210impl QueryServerWriteTransaction<'_> {
211    /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
212    /// level.
213    pub(crate) fn internal_apply_domain_migration(
214        &mut self,
215        to_level: u32,
216    ) -> Result<(), OperationError> {
217        assert!(to_level > self.get_domain_version());
218        self.internal_modify_uuid(
219            UUID_DOMAIN_INFO,
220            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
221        )
222        .and_then(|()| self.reload())
223    }
224
225    fn internal_migrate_or_create_batch(
226        &mut self,
227        msg: &str,
228        entries: Vec<EntryInitNew>,
229    ) -> Result<(), OperationError> {
230        let r: Result<(), _> = entries
231            .into_iter()
232            .try_for_each(|entry| self.internal_migrate_or_create(entry));
233
234        if let Err(err) = r {
235            error!(?err, msg);
236            debug_assert!(false);
237        }
238
239        Ok(())
240    }
241
242    #[instrument(level = "debug", skip_all)]
243    /// - If the thing exists:
244    ///   - Ensure the set of attributes match and are present
245    ///     (but don't delete multivalue, or extended attributes in the situation.
246    /// - If not:
247    ///   - Create the entry
248    ///
249    /// This will extra classes an attributes alone!
250    ///
251    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
252    fn internal_migrate_or_create(
253        &mut self,
254        e: Entry<EntryInit, EntryNew>,
255    ) -> Result<(), OperationError> {
256        self.internal_migrate_or_create_ignore_attrs(e, &[])
257    }
258
259    #[instrument(level = "debug", skip_all)]
260    fn internal_delete_batch(
261        &mut self,
262        msg: &str,
263        entries: Vec<Uuid>,
264    ) -> Result<(), OperationError> {
265        let filter = entries
266            .into_iter()
267            .map(|uuid| f_eq(Attribute::Uuid, PartialValue::Uuid(uuid)))
268            .collect();
269
270        let filter = filter_all!(f_or(filter));
271
272        let result = self.internal_delete(&filter);
273
274        match result {
275            Ok(_) | Err(OperationError::NoMatchingEntries) => Ok(()),
276            Err(err) => {
277                error!(?err, msg);
278                Err(err)
279            }
280        }
281    }
282
283    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create] but it will ignore the specified
284    /// list of attributes, so that if an admin has modified those values then we don't
285    /// stomp them.
286    #[instrument(level = "trace", skip_all)]
287    fn internal_migrate_or_create_ignore_attrs(
288        &mut self,
289        mut e: Entry<EntryInit, EntryNew>,
290        attrs: &[Attribute],
291    ) -> Result<(), OperationError> {
292        trace!("operating on {:?}", e.get_uuid());
293
294        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
295            return Err(OperationError::FilterGeneration);
296        };
297
298        trace!("search {:?}", filt);
299
300        let results = self.internal_search(filt.clone())?;
301
302        if results.is_empty() {
303            // It does not exist. Create it.
304            self.internal_create(vec![e])
305        } else if results.len() == 1 {
306            // For each ignored attr, we remove it from entry.
307            for attr in attrs.iter() {
308                e.remove_ava(attr);
309            }
310
311            // If the thing is subset, pass
312            match e.gen_modlist_assert(&self.schema) {
313                Ok(modlist) => {
314                    // Apply to &results[0]
315                    trace!(?modlist);
316                    self.internal_modify(&filt, &modlist)
317                }
318                Err(e) => Err(OperationError::SchemaViolation(e)),
319            }
320        } else {
321            admin_error!(
322                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
323                filt,
324                results
325            );
326            Err(OperationError::InvalidDbState)
327        }
328    }
329
330    // Commented as an example of patch application
331    /*
332    /// Patch Application - This triggers a one-shot fixup task for issue #3178
333    /// to force access controls to re-migrate in existing databases so that they're
334    /// content matches expected values.
335    #[instrument(level = "info", skip_all)]
336    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
337        admin_warn!("applying domain patch 2.");
338
339        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
340
341        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
342
343        idm_data
344            .into_iter()
345            .try_for_each(|entry| self.internal_migrate_or_create(entry))
346            .map_err(|err| {
347                error!(?err, "migrate_domain_patch_level_2 -> Error");
348                err
349            })?;
350
351        self.reload()?;
352
353        Ok(())
354    }
355    */
356
357    /// Migration domain level 9 to 10 (1.6.0)
358    #[instrument(level = "info", skip_all)]
359    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
360        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
361            error!("Unable to raise domain level from 9 to 10.");
362            return Err(OperationError::MG0004DomainLevelInDevelopment);
363        }
364
365        // =========== Apply changes ==============
366        self.internal_migrate_or_create_batch(
367            "phase 1 - schema attrs",
368            migration_data::dl10::phase_1_schema_attrs(),
369        )?;
370
371        self.internal_migrate_or_create_batch(
372            "phase 2 - schema classes",
373            migration_data::dl10::phase_2_schema_classes(),
374        )?;
375
376        // Reload for the new schema.
377        self.reload()?;
378
379        // Since we just loaded in a ton of schema, lets reindex it in case we added
380        // new indexes, or this is a bootstrap and we have no indexes yet.
381        self.reindex(false)?;
382
383        // Set Phase
384        // Indicate the schema is now ready, which allows dyngroups to work when they
385        // are created in the next phase of migrations.
386        self.set_phase(ServerPhase::SchemaReady);
387
388        self.internal_migrate_or_create_batch(
389            "phase 3 - key provider",
390            migration_data::dl10::phase_3_key_provider(),
391        )?;
392
393        // Reload for the new key providers
394        self.reload()?;
395
396        self.internal_migrate_or_create_batch(
397            "phase 4 - system entries",
398            migration_data::dl10::phase_4_system_entries(),
399        )?;
400
401        // Reload for the new system entries
402        self.reload()?;
403
404        // Domain info is now ready and reloaded, we can proceed.
405        self.set_phase(ServerPhase::DomainInfoReady);
406
407        // Bring up the IDM entries.
408        self.internal_migrate_or_create_batch(
409            "phase 5 - builtin admin entries",
410            migration_data::dl10::phase_5_builtin_admin_entries()?,
411        )?;
412
413        self.internal_migrate_or_create_batch(
414            "phase 6 - builtin not admin entries",
415            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
416        )?;
417
418        self.internal_migrate_or_create_batch(
419            "phase 7 - builtin access control profiles",
420            migration_data::dl10::phase_7_builtin_access_control_profiles(),
421        )?;
422
423        self.reload()?;
424
425        // =========== OAuth2 Cryptography Migration ==============
426
427        debug!("START OAUTH2 MIGRATION");
428
429        // Load all the OAuth2 providers.
430        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
431            Attribute::Class,
432            EntryClass::OAuth2ResourceServer.into()
433        )))?;
434
435        if !all_oauth2_rs_entries.is_empty() {
436            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
437                let entry_uuid = tgt_entry.get_uuid();
438                let mut modlist = ModifyList::new_list(vec![
439                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
440                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
441                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
442                    // Delete the fernet key, rs256 if any, and the es256 key
443                    Modify::Purged(Attribute::OAuth2RsTokenKey),
444                    Modify::Purged(Attribute::Es256PrivateKeyDer),
445                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
446                ]);
447
448                trace!(?tgt_entry);
449
450                // Import the ES256 Key
451                if let Some(es256_private_der) =
452                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
453                {
454                    modlist.push_mod(Modify::Present(
455                        Attribute::KeyActionImportJwsEs256,
456                        Value::PrivateBinary(es256_private_der.to_vec()),
457                    ))
458                } else {
459                    warn!("Unable to migrate es256 key");
460                }
461
462                let has_rs256 = tgt_entry
463                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
464                    .unwrap_or(false);
465
466                // If there is an rs256 key, import it.
467                // Import the RS256 Key
468                if has_rs256 {
469                    modlist.push_mod(Modify::Present(
470                        Attribute::Class,
471                        EntryClass::KeyObjectJwtEs256.to_value(),
472                    ));
473
474                    if let Some(rs256_private_der) =
475                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
476                    {
477                        modlist.push_mod(Modify::Present(
478                            Attribute::KeyActionImportJwsRs256,
479                            Value::PrivateBinary(rs256_private_der.to_vec()),
480                        ))
481                    } else {
482                        warn!("Unable to migrate rs256 key");
483                    }
484                }
485
486                (entry_uuid, modlist)
487            });
488
489            self.internal_batch_modify(entry_iter)?;
490        }
491
492        // Reload for new keys, and updated oauth2
493        self.reload()?;
494
495        // Done!
496
497        Ok(())
498    }
499
500    /// Migration domain level 10 to 11 (1.7.0)
501    #[instrument(level = "info", skip_all)]
502    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
503        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
504            error!("Unable to raise domain level from 10 to 11.");
505            return Err(OperationError::MG0004DomainLevelInDevelopment);
506        }
507
508        // =========== Apply changes ==============
509        self.internal_migrate_or_create_batch(
510            "phase 1 - schema attrs",
511            migration_data::dl11::phase_1_schema_attrs(),
512        )?;
513
514        self.internal_migrate_or_create_batch(
515            "phase 2 - schema classes",
516            migration_data::dl11::phase_2_schema_classes(),
517        )?;
518
519        // Reload for the new schema.
520        self.reload()?;
521
522        // Since we just loaded in a ton of schema, lets reindex it in case we added
523        // new indexes, or this is a bootstrap and we have no indexes yet.
524        self.reindex(false)?;
525
526        // Set Phase
527        // Indicate the schema is now ready, which allows dyngroups to work when they
528        // are created in the next phase of migrations.
529        self.set_phase(ServerPhase::SchemaReady);
530
531        self.internal_migrate_or_create_batch(
532            "phase 3 - key provider",
533            migration_data::dl11::phase_3_key_provider(),
534        )?;
535
536        // Reload for the new key providers
537        self.reload()?;
538
539        self.internal_migrate_or_create_batch(
540            "phase 4 - system entries",
541            migration_data::dl11::phase_4_system_entries(),
542        )?;
543
544        // Reload for the new system entries
545        self.reload()?;
546
547        // Domain info is now ready and reloaded, we can proceed.
548        self.set_phase(ServerPhase::DomainInfoReady);
549
550        // Bring up the IDM entries.
551        self.internal_migrate_or_create_batch(
552            "phase 5 - builtin admin entries",
553            migration_data::dl11::phase_5_builtin_admin_entries()?,
554        )?;
555
556        self.internal_migrate_or_create_batch(
557            "phase 6 - builtin not admin entries",
558            migration_data::dl11::phase_6_builtin_non_admin_entries()?,
559        )?;
560
561        self.internal_migrate_or_create_batch(
562            "phase 7 - builtin access control profiles",
563            migration_data::dl11::phase_7_builtin_access_control_profiles(),
564        )?;
565
566        self.reload()?;
567
568        Ok(())
569    }
570
571    /// Migration domain level 11 to 12 (1.8.0)
572    #[instrument(level = "info", skip_all)]
573    pub(crate) fn migrate_domain_11_to_12(&mut self) -> Result<(), OperationError> {
574        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_11 {
575            error!("Unable to raise domain level from 11 to 12.");
576            return Err(OperationError::MG0004DomainLevelInDevelopment);
577        }
578
579        // =========== Apply changes ==============
580        self.internal_migrate_or_create_batch(
581            "phase 1 - schema attrs",
582            migration_data::dl12::phase_1_schema_attrs(),
583        )?;
584
585        self.internal_migrate_or_create_batch(
586            "phase 2 - schema classes",
587            migration_data::dl12::phase_2_schema_classes(),
588        )?;
589
590        // Reload for the new schema.
591        self.reload()?;
592
593        // Since we just loaded in a ton of schema, lets reindex it in case we added
594        // new indexes, or this is a bootstrap and we have no indexes yet.
595        self.reindex(false)?;
596
597        // Set Phase
598        // Indicate the schema is now ready, which allows dyngroups to work when they
599        // are created in the next phase of migrations.
600        self.set_phase(ServerPhase::SchemaReady);
601
602        self.internal_migrate_or_create_batch(
603            "phase 3 - key provider",
604            migration_data::dl12::phase_3_key_provider(),
605        )?;
606
607        // Reload for the new key providers
608        self.reload()?;
609
610        self.internal_migrate_or_create_batch(
611            "phase 4 - system entries",
612            migration_data::dl12::phase_4_system_entries(),
613        )?;
614
615        // Reload for the new system entries
616        self.reload()?;
617
618        // Domain info is now ready and reloaded, we can proceed.
619        self.set_phase(ServerPhase::DomainInfoReady);
620
621        // Bring up the IDM entries.
622        self.internal_migrate_or_create_batch(
623            "phase 5 - builtin admin entries",
624            migration_data::dl12::phase_5_builtin_admin_entries()?,
625        )?;
626
627        self.internal_migrate_or_create_batch(
628            "phase 6 - builtin not admin entries",
629            migration_data::dl12::phase_6_builtin_non_admin_entries()?,
630        )?;
631
632        self.internal_migrate_or_create_batch(
633            "phase 7 - builtin access control profiles",
634            migration_data::dl12::phase_7_builtin_access_control_profiles(),
635        )?;
636
637        self.reload()?;
638
639        // Cleanup any leftover id keys
640        let modlist = ModifyList::new_purge(Attribute::IdVerificationEcKey);
641        let filter = filter_all!(f_pres(Attribute::IdVerificationEcKey));
642
643        self.internal_modify(&filter, &modlist)?;
644
645        Ok(())
646    }
647
648    /// Migration domain level 12 to 13 (1.9.0)
649    #[instrument(level = "info", skip_all)]
650    pub(crate) fn migrate_domain_12_to_13(&mut self) -> Result<(), OperationError> {
651        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_12 {
652            error!("Unable to raise domain level from 12 to 13.");
653            return Err(OperationError::MG0004DomainLevelInDevelopment);
654        }
655
656        // =========== Apply changes ==============
657        self.internal_migrate_or_create_batch(
658            "phase 1 - schema attrs",
659            migration_data::dl13::phase_1_schema_attrs(),
660        )?;
661
662        self.internal_migrate_or_create_batch(
663            "phase 2 - schema classes",
664            migration_data::dl13::phase_2_schema_classes(),
665        )?;
666
667        // Reload for the new schema.
668        self.reload()?;
669
670        // Since we just loaded in a ton of schema, lets reindex it in case we added
671        // new indexes, or this is a bootstrap and we have no indexes yet.
672        self.reindex(false)?;
673
674        // Set Phase
675        // Indicate the schema is now ready, which allows dyngroups to work when they
676        // are created in the next phase of migrations.
677        self.set_phase(ServerPhase::SchemaReady);
678
679        self.internal_migrate_or_create_batch(
680            "phase 3 - key provider",
681            migration_data::dl13::phase_3_key_provider(),
682        )?;
683
684        // Reload for the new key providers
685        self.reload()?;
686
687        self.internal_migrate_or_create_batch(
688            "phase 4 - system entries",
689            migration_data::dl13::phase_4_system_entries(),
690        )?;
691
692        // Reload for the new system entries
693        self.reload()?;
694
695        // Domain info is now ready and reloaded, we can proceed.
696        self.set_phase(ServerPhase::DomainInfoReady);
697
698        // Bring up the IDM entries.
699        self.internal_migrate_or_create_batch(
700            "phase 5 - builtin admin entries",
701            migration_data::dl13::phase_5_builtin_admin_entries()?,
702        )?;
703
704        self.internal_migrate_or_create_batch(
705            "phase 6 - builtin not admin entries",
706            migration_data::dl13::phase_6_builtin_non_admin_entries()?,
707        )?;
708
709        self.internal_migrate_or_create_batch(
710            "phase 7 - builtin access control profiles",
711            migration_data::dl13::phase_7_builtin_access_control_profiles(),
712        )?;
713
714        self.internal_delete_batch(
715            "phase 8 - delete UUIDS",
716            migration_data::dl13::phase_8_delete_uuids(),
717        )?;
718
719        self.reload()?;
720
721        Ok(())
722    }
723
724    /// Migration domain level 13 to 14 (1.10.0)
725    #[instrument(level = "info", skip_all)]
726    pub(crate) fn migrate_domain_13_to_14(&mut self) -> Result<(), OperationError> {
727        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_13 {
728            error!("Unable to raise domain level from 13 to 14.");
729            return Err(OperationError::MG0004DomainLevelInDevelopment);
730        }
731
732        Ok(())
733    }
734
735    #[instrument(level = "info", skip_all)]
736    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
737        admin_debug!("initialise_schema_core -> start ...");
738        // Load in all the "core" schema, that we already have in "memory".
739        let entries = self.schema.to_entries();
740
741        // admin_debug!("Dumping schemas: {:?}", entries);
742
743        // internal_migrate_or_create.
744        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
745            trace!(?e, "init schema entry");
746            self.internal_migrate_or_create(e)
747        });
748        if r.is_ok() {
749            admin_debug!("initialise_schema_core -> Ok!");
750        } else {
751            admin_error!(?r, "initialise_schema_core -> Error");
752        }
753        // why do we have error handling if it's always supposed to be `Ok`?
754        debug_assert!(r.is_ok());
755        r
756    }
757}
758
759impl QueryServerReadTransaction<'_> {
760    /// Retrieve the domain info of this server
761    pub fn domain_upgrade_check(
762        &mut self,
763    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
764        let d_info = &self.d_info;
765
766        let name = d_info.d_name.clone();
767        let uuid = d_info.d_uuid;
768        let current_level = d_info.d_vers;
769        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
770
771        let mut report_items = Vec::with_capacity(1);
772
773        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
774            let item = self
775                .domain_upgrade_check_7_to_8_security_keys()
776                .map_err(|err| {
777                    error!(
778                        ?err,
779                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
780                    );
781                    err
782                })?;
783            report_items.push(item);
784
785            let item = self
786                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
787                .map_err(|err| {
788                    error!(
789                        ?err,
790                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
791                    );
792                    err
793                })?;
794            report_items.push(item);
795        }
796
797        Ok(ProtoDomainUpgradeCheckReport {
798            name,
799            uuid,
800            current_level,
801            upgrade_level,
802            report_items,
803        })
804    }
805
806    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
807        &mut self,
808    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
809        let filter = filter!(f_and!([
810            f_eq(Attribute::Class, EntryClass::Account.into()),
811            f_pres(Attribute::PrimaryCredential),
812        ]));
813
814        let results = self.internal_search(filter)?;
815
816        let affected_entries = results
817            .into_iter()
818            .filter_map(|entry| {
819                if entry
820                    .get_ava_single_credential(Attribute::PrimaryCredential)
821                    .map(|cred| cred.has_securitykey())
822                    .unwrap_or_default()
823                {
824                    Some(entry.get_display_id())
825                } else {
826                    None
827                }
828            })
829            .collect::<Vec<_>>();
830
831        let status = if affected_entries.is_empty() {
832            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
833        } else {
834            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
835        };
836
837        Ok(ProtoDomainUpgradeCheckItem {
838            status,
839            from_level: DOMAIN_LEVEL_7,
840            to_level: DOMAIN_LEVEL_8,
841            affected_entries,
842        })
843    }
844
845    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
846        &mut self,
847    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
848        let filter = filter!(f_and!([
849            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
850            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
851        ]));
852
853        let results = self.internal_search(filter)?;
854
855        let affected_entries = results
856            .into_iter()
857            .map(|entry| entry.get_display_id())
858            .collect::<Vec<_>>();
859
860        let status = if affected_entries.is_empty() {
861            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
862        } else {
863            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
864        };
865
866        Ok(ProtoDomainUpgradeCheckItem {
867            status,
868            from_level: DOMAIN_LEVEL_7,
869            to_level: DOMAIN_LEVEL_8,
870            affected_entries,
871        })
872    }
873}
874
875#[cfg(test)]
876mod tests {
877    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
878    use crate::prelude::*;
879
880    #[qs_test]
881    async fn test_init_idempotent_schema_core(server: &QueryServer) {
882        {
883            // Setup and abort.
884            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
885            assert!(server_txn.initialise_schema_core().is_ok());
886        }
887        {
888            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
889            assert!(server_txn.initialise_schema_core().is_ok());
890            assert!(server_txn.initialise_schema_core().is_ok());
891            assert!(server_txn.commit().is_ok());
892        }
893        {
894            // Now do it again in a new txn, but abort
895            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
896            assert!(server_txn.initialise_schema_core().is_ok());
897        }
898        {
899            // Now do it again in a new txn.
900            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
901            assert!(server_txn.initialise_schema_core().is_ok());
902            assert!(server_txn.commit().is_ok());
903        }
904    }
905
906    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
907    async fn test_migrations_dl10_dl11(server: &QueryServer) {
908        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
909
910        let db_domain_version = write_txn
911            .internal_search_uuid(UUID_DOMAIN_INFO)
912            .expect("unable to access domain entry")
913            .get_ava_single_uint32(Attribute::Version)
914            .expect("Attribute Version not present");
915
916        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
917
918        write_txn.commit().expect("Unable to commit");
919
920        // == pre migration verification. ==
921        // check we currently would fail a migration.
922
923        // let mut read_txn = server.read().await.unwrap();
924        // drop(read_txn);
925
926        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
927
928        // Fix any issues
929
930        // == Increase the version ==
931        write_txn
932            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
933            .expect("Unable to set domain level to version 11");
934
935        // post migration verification.
936
937        write_txn.commit().expect("Unable to commit");
938    }
939
940    #[qs_test(domain_level=DOMAIN_LEVEL_11)]
941    async fn test_migrations_dl11_dl12(server: &QueryServer) {
942        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
943
944        let db_domain_version = write_txn
945            .internal_search_uuid(UUID_DOMAIN_INFO)
946            .expect("unable to access domain entry")
947            .get_ava_single_uint32(Attribute::Version)
948            .expect("Attribute Version not present");
949
950        assert_eq!(db_domain_version, DOMAIN_LEVEL_11);
951
952        // Make a new person.
953        let tuuid = Uuid::new_v4();
954        let e1 = entry_init!(
955            (Attribute::Class, EntryClass::Object.to_value()),
956            (Attribute::Class, EntryClass::Person.to_value()),
957            (Attribute::Class, EntryClass::Account.to_value()),
958            (Attribute::Name, Value::new_iname("testperson1")),
959            (Attribute::Uuid, Value::Uuid(tuuid)),
960            (Attribute::Description, Value::new_utf8s("testperson1")),
961            (Attribute::DisplayName, Value::new_utf8s("testperson1"))
962        );
963
964        write_txn
965            .internal_create(vec![e1])
966            .expect("Unable to create user");
967
968        let user = write_txn
969            .internal_search_uuid(tuuid)
970            .expect("Unable to load user");
971
972        // They still have an id verification key
973        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_some());
974
975        write_txn.commit().expect("Unable to commit");
976
977        // == pre migration verification. ==
978        // check we currently would fail a migration.
979
980        // let mut read_txn = server.read().await.unwrap();
981        // drop(read_txn);
982
983        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
984
985        // Fix any issues
986
987        // == Increase the version ==
988        write_txn
989            .internal_apply_domain_migration(DOMAIN_LEVEL_12)
990            .expect("Unable to set domain level to version 12");
991
992        // post migration verification.
993        let user = write_txn
994            .internal_search_uuid(tuuid)
995            .expect("Unable to load user");
996
997        // The key has been removed.
998        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
999
1000        // New users don't get a key
1001        let t2uuid = Uuid::new_v4();
1002        let e2 = entry_init!(
1003            (Attribute::Class, EntryClass::Object.to_value()),
1004            (Attribute::Class, EntryClass::Person.to_value()),
1005            (Attribute::Class, EntryClass::Account.to_value()),
1006            (Attribute::Name, Value::new_iname("testperson2")),
1007            (Attribute::Uuid, Value::Uuid(t2uuid)),
1008            (Attribute::Description, Value::new_utf8s("testperson2")),
1009            (Attribute::DisplayName, Value::new_utf8s("testperson2"))
1010        );
1011
1012        write_txn
1013            .internal_create(vec![e2])
1014            .expect("Unable to create user");
1015
1016        let user = write_txn
1017            .internal_search_uuid(t2uuid)
1018            .expect("Unable to load user");
1019
1020        // No key!
1021        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
1022
1023        write_txn.commit().expect("Unable to commit");
1024    }
1025
1026    #[qs_test(domain_level=DOMAIN_LEVEL_12)]
1027    async fn test_migrations_dl12_dl13(server: &QueryServer) {
1028        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1029
1030        let db_domain_version = write_txn
1031            .internal_search_uuid(UUID_DOMAIN_INFO)
1032            .expect("unable to access domain entry")
1033            .get_ava_single_uint32(Attribute::Version)
1034            .expect("Attribute Version not present");
1035
1036        assert_eq!(db_domain_version, DOMAIN_LEVEL_12);
1037
1038        write_txn.commit().expect("Unable to commit");
1039
1040        // == pre migration verification. ==
1041        // check we currently would fail a migration.
1042
1043        // let mut read_txn = server.read().await.unwrap();
1044        // drop(read_txn);
1045
1046        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1047
1048        // Fix any issues
1049
1050        // == Increase the version ==
1051        write_txn
1052            .internal_apply_domain_migration(DOMAIN_LEVEL_13)
1053            .expect("Unable to set domain level to version 13");
1054
1055        // post migration verification.
1056
1057        write_txn.commit().expect("Unable to commit");
1058    }
1059}