kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // No domain info was present, so neither was the rest of the IDM. Bring up the
58            // full IDM here.
59            match domain_target_level {
60                DOMAIN_LEVEL_8 => write_txn.migrate_domain_7_to_8()?,
61                DOMAIN_LEVEL_9 => write_txn.migrate_domain_8_to_9()?,
62                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
63                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
64                _ => {
65                    error!("Invalid requested domain target level for server bootstrap");
66                    debug_assert!(false);
67                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
68                }
69            }
70        } else {
71            // Domain info was present, so we need to reflect that in our server
72            // domain structures. If we don't do this, the in memory domain level
73            // is stuck at 0 which can confuse init domain info below.
74            //
75            // This also is where the former domain taint flag will be loaded to
76            // d_info so that if the *previous* execution of the database was
77            // a devel version, we'll still trigger the forced remigration in
78            // in the case that we are moving from dev -> stable.
79            write_txn.force_domain_reload();
80
81            write_txn.reload()?;
82
83            // Indicate the schema is now ready, which allows dyngroups to work when they
84            // are created in the next phase of migrations.
85            write_txn.set_phase(ServerPhase::SchemaReady);
86
87            // #2756 - if we *aren't* creating the base IDM entries, then we
88            // need to force dyn groups to reload since we're now at schema
89            // ready. This is done indirectly by ... reloading the schema again.
90            //
91            // This is because dyngroups don't load until server phase >= schemaready
92            // and the reload path for these is either a change in the dyngroup entry
93            // itself or a change to schema reloading. Since we aren't changing the
94            // dyngroup here, we have to go via the schema reload path.
95            write_txn.force_schema_reload();
96
97            // Reload as init idm affects access controls.
98            write_txn.reload()?;
99
100            // Domain info is now ready and reloaded, we can proceed.
101            write_txn.set_phase(ServerPhase::DomainInfoReady);
102        }
103
104        // This is the start of domain info related migrations which we will need in future
105        // to handle replication. Due to the access control rework, and the addition of "managed by"
106        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
107        // by changing domain infos to be incompatible during this phase.
108
109        // The reloads will have populated this structure now.
110        let domain_info_version = write_txn.get_domain_version();
111        let domain_patch_level = write_txn.get_domain_patch_level();
112        let domain_development_taint = write_txn.get_domain_development_taint();
113        debug!(
114            ?db_domain_version,
115            ?domain_patch_level,
116            ?domain_development_taint,
117            "After setting internal domain info"
118        );
119
120        let mut reload_required = false;
121
122        // If the database domain info is a lower version than our target level, we reload.
123        if domain_info_version < domain_target_level {
124            write_txn
125                .internal_apply_domain_migration(domain_target_level)
126                .map(|()| {
127                    warn!("Domain level has been raised to {}", domain_target_level);
128                })?;
129            // Reload if anything in migrations requires it - this triggers the domain migrations
130            // which in turn can trigger schema reloads etc. If the server was just brought up
131            // then we don't need the extra reload since we are already at the correct
132            // version of the server, and this call to set the target level is just for persistance
133            // of the value.
134            if domain_info_version != 0 {
135                reload_required = true;
136            }
137        } else if domain_development_taint {
138            // This forces pre-release versions to re-migrate each start up. This solves
139            // the domain-version-sprawl issue so that during a development cycle we can
140            // do a single domain version bump, and continue to extend the migrations
141            // within that release cycle to contain what we require.
142            //
143            // If this is a pre-release build
144            // AND
145            // we are NOT in a test environment
146            // AND
147            // We did not already need a version migration as above
148            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
149
150            reload_required = true;
151        }
152
153        // If we are new enough to support patches, and we are lower than the target patch level
154        // then a reload will be applied after we raise the patch level.
155        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
156            write_txn
157                .internal_modify_uuid(
158                    UUID_DOMAIN_INFO,
159                    &ModifyList::new_purge_and_set(
160                        Attribute::PatchLevel,
161                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
162                    ),
163                )
164                .map(|()| {
165                    warn!(
166                        "Domain patch level has been raised to {}",
167                        domain_patch_level
168                    );
169                })?;
170
171            reload_required = true;
172        };
173
174        // Execute whatever operations we have batched up and ready to go. This is needed
175        // to preserve ordering of the operations - if we reloaded after a remigrate then
176        // we would have skipped the patch level fix which needs to have occurred *first*.
177        if reload_required {
178            write_txn.reload()?;
179        }
180
181        // Now set the db/domain devel taint flag to match our current release status
182        // if it changes. This is what breaks the cycle of db taint from dev -> stable
183        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
184        if current_devel_flag {
185            warn!("Domain Development Taint mode is enabled");
186        }
187        if domain_development_taint != current_devel_flag {
188            write_txn.internal_modify_uuid(
189                UUID_DOMAIN_INFO,
190                &ModifyList::new_purge_and_set(
191                    Attribute::DomainDevelopmentTaint,
192                    Value::Bool(current_devel_flag),
193                ),
194            )?;
195        }
196
197        // We are ready to run
198        write_txn.set_phase(ServerPhase::Running);
199
200        // Commit all changes, this also triggers the final reload, this should be a no-op
201        // since we already did all the needed loads above.
202        write_txn.commit()?;
203
204        debug!("Database version check and migrations success! ☀️  ");
205        Ok(())
206    }
207}
208
209impl QueryServerWriteTransaction<'_> {
210    /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
211    /// level.
212    pub(crate) fn internal_apply_domain_migration(
213        &mut self,
214        to_level: u32,
215    ) -> Result<(), OperationError> {
216        assert!(to_level > self.get_domain_version());
217        self.internal_modify_uuid(
218            UUID_DOMAIN_INFO,
219            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
220        )
221        .and_then(|()| self.reload())
222    }
223
224    fn internal_migrate_or_create_batch(
225        &mut self,
226        msg: &str,
227        entries: Vec<EntryInitNew>,
228    ) -> Result<(), OperationError> {
229        let r: Result<(), _> = entries
230            .into_iter()
231            .try_for_each(|entry| self.internal_migrate_or_create(entry));
232
233        if let Err(err) = r {
234            error!(?err, msg);
235            debug_assert!(false);
236        }
237
238        Ok(())
239    }
240
241    #[instrument(level = "debug", skip_all)]
242    /// - If the thing exists:
243    ///   - Ensure the set of attributes match and are present
244    ///     (but don't delete multivalue, or extended attributes in the situation.
245    /// - If not:
246    ///   - Create the entry
247    ///
248    /// This will extra classes an attributes alone!
249    ///
250    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
251    fn internal_migrate_or_create(
252        &mut self,
253        e: Entry<EntryInit, EntryNew>,
254    ) -> Result<(), OperationError> {
255        self.internal_migrate_or_create_ignore_attrs(e, &[])
256    }
257
258    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create] but it will ignore the specified
259    /// list of attributes, so that if an admin has modified those values then we don't
260    /// stomp them.
261    #[instrument(level = "trace", skip_all)]
262    fn internal_migrate_or_create_ignore_attrs(
263        &mut self,
264        mut e: Entry<EntryInit, EntryNew>,
265        attrs: &[Attribute],
266    ) -> Result<(), OperationError> {
267        trace!("operating on {:?}", e.get_uuid());
268
269        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
270            return Err(OperationError::FilterGeneration);
271        };
272
273        trace!("search {:?}", filt);
274
275        let results = self.internal_search(filt.clone())?;
276
277        if results.is_empty() {
278            // It does not exist. Create it.
279            self.internal_create(vec![e])
280        } else if results.len() == 1 {
281            // For each ignored attr, we remove it from entry.
282            for attr in attrs.iter() {
283                e.remove_ava(attr);
284            }
285
286            // If the thing is subset, pass
287            match e.gen_modlist_assert(&self.schema) {
288                Ok(modlist) => {
289                    // Apply to &results[0]
290                    trace!(?modlist);
291                    self.internal_modify(&filt, &modlist)
292                }
293                Err(e) => Err(OperationError::SchemaViolation(e)),
294            }
295        } else {
296            admin_error!(
297                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
298                filt,
299                results
300            );
301            Err(OperationError::InvalidDbState)
302        }
303    }
304
305    /// Migration domain level 7 to 8 (1.4.0)
306    #[instrument(level = "info", skip_all)]
307    pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> {
308        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
309            error!("Unable to raise domain level from 8 to 9.");
310            return Err(OperationError::MG0004DomainLevelInDevelopment);
311        }
312
313        // =========== Apply changes ==============
314        self.internal_migrate_or_create_batch(
315            "phase 1 - schema attrs",
316            migration_data::dl8::phase_1_schema_attrs(),
317        )?;
318
319        self.internal_migrate_or_create_batch(
320            "phase 2 - schema classes",
321            migration_data::dl8::phase_2_schema_classes(),
322        )?;
323
324        // Reload for the new schema.
325        self.reload()?;
326
327        // Reindex?
328        self.reindex(false)?;
329
330        // Set Phase
331        self.set_phase(ServerPhase::SchemaReady);
332
333        self.internal_migrate_or_create_batch(
334            "phase 3 - key provider",
335            migration_data::dl8::phase_3_key_provider(),
336        )?;
337
338        // Reload for the new key providers
339        self.reload()?;
340
341        self.internal_migrate_or_create_batch(
342            "phase 4 - system entries",
343            migration_data::dl8::phase_4_system_entries(),
344        )?;
345
346        // Reload for the new system entries
347        self.reload()?;
348
349        // Domain info is now ready and reloaded, we can proceed.
350        self.set_phase(ServerPhase::DomainInfoReady);
351
352        // Bring up the IDM entries.
353        self.internal_migrate_or_create_batch(
354            "phase 5 - builtin admin entries",
355            migration_data::dl8::phase_5_builtin_admin_entries()?,
356        )?;
357
358        self.internal_migrate_or_create_batch(
359            "phase 6 - builtin not admin entries",
360            migration_data::dl8::phase_6_builtin_non_admin_entries()?,
361        )?;
362
363        self.internal_migrate_or_create_batch(
364            "phase 7 - builtin access control profiles",
365            migration_data::dl8::phase_7_builtin_access_control_profiles(),
366        )?;
367
368        // Reload for all new access controls.
369        self.reload()?;
370
371        Ok(())
372    }
373
374    /// Migration domain level 8 to 9 (1.5.0)
375    #[instrument(level = "info", skip_all)]
376    pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
377        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
378            error!("Unable to raise domain level from 8 to 9.");
379            return Err(OperationError::MG0004DomainLevelInDevelopment);
380        }
381
382        // =========== Apply changes ==============
383        self.internal_migrate_or_create_batch(
384            "phase 1 - schema attrs",
385            migration_data::dl9::phase_1_schema_attrs(),
386        )?;
387
388        self.internal_migrate_or_create_batch(
389            "phase 2 - schema classes",
390            migration_data::dl9::phase_2_schema_classes(),
391        )?;
392
393        // Reload for the new schema.
394        self.reload()?;
395
396        // Reindex?
397        self.reindex(false)?;
398
399        // Set Phase
400        self.set_phase(ServerPhase::SchemaReady);
401
402        self.internal_migrate_or_create_batch(
403            "phase 3 - key provider",
404            migration_data::dl9::phase_3_key_provider(),
405        )?;
406
407        // Reload for the new key providers
408        self.reload()?;
409
410        self.internal_migrate_or_create_batch(
411            "phase 4 - system entries",
412            migration_data::dl9::phase_4_system_entries(),
413        )?;
414
415        // Reload for the new system entries
416        self.reload()?;
417
418        // Domain info is now ready and reloaded, we can proceed.
419        self.set_phase(ServerPhase::DomainInfoReady);
420
421        // Bring up the IDM entries.
422        self.internal_migrate_or_create_batch(
423            "phase 5 - builtin admin entries",
424            migration_data::dl9::phase_5_builtin_admin_entries()?,
425        )?;
426
427        self.internal_migrate_or_create_batch(
428            "phase 6 - builtin not admin entries",
429            migration_data::dl9::phase_6_builtin_non_admin_entries()?,
430        )?;
431
432        self.internal_migrate_or_create_batch(
433            "phase 7 - builtin access control profiles",
434            migration_data::dl9::phase_7_builtin_access_control_profiles(),
435        )?;
436
437        // Reload for all new access controls.
438        self.reload()?;
439
440        Ok(())
441    }
442
443    /// Patch Application - This triggers a one-shot fixup task for issue #3178
444    /// to force access controls to re-migrate in existing databases so that they're
445    /// content matches expected values.
446    #[instrument(level = "info", skip_all)]
447    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
448        admin_warn!("applying domain patch 2.");
449
450        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
451
452        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
453
454        idm_data
455            .into_iter()
456            .try_for_each(|entry| self.internal_migrate_or_create(entry))
457            .map_err(|err| {
458                error!(?err, "migrate_domain_patch_level_2 -> Error");
459                err
460            })?;
461
462        self.reload()?;
463
464        Ok(())
465    }
466
467    /// Migration domain level 9 to 10 (1.6.0)
468    #[instrument(level = "info", skip_all)]
469    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
470        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
471            error!("Unable to raise domain level from 9 to 10.");
472            return Err(OperationError::MG0004DomainLevelInDevelopment);
473        }
474
475        // =========== Apply changes ==============
476        self.internal_migrate_or_create_batch(
477            "phase 1 - schema attrs",
478            migration_data::dl10::phase_1_schema_attrs(),
479        )?;
480
481        self.internal_migrate_or_create_batch(
482            "phase 2 - schema classes",
483            migration_data::dl10::phase_2_schema_classes(),
484        )?;
485
486        // Reload for the new schema.
487        self.reload()?;
488
489        // Since we just loaded in a ton of schema, lets reindex it incase we added
490        // new indexes, or this is a bootstrap and we have no indexes yet.
491        self.reindex(false)?;
492
493        // Set Phase
494        // Indicate the schema is now ready, which allows dyngroups to work when they
495        // are created in the next phase of migrations.
496        self.set_phase(ServerPhase::SchemaReady);
497
498        self.internal_migrate_or_create_batch(
499            "phase 3 - key provider",
500            migration_data::dl10::phase_3_key_provider(),
501        )?;
502
503        // Reload for the new key providers
504        self.reload()?;
505
506        self.internal_migrate_or_create_batch(
507            "phase 4 - system entries",
508            migration_data::dl10::phase_4_system_entries(),
509        )?;
510
511        // Reload for the new system entries
512        self.reload()?;
513
514        // Domain info is now ready and reloaded, we can proceed.
515        self.set_phase(ServerPhase::DomainInfoReady);
516
517        // Bring up the IDM entries.
518        self.internal_migrate_or_create_batch(
519            "phase 5 - builtin admin entries",
520            migration_data::dl10::phase_5_builtin_admin_entries()?,
521        )?;
522
523        self.internal_migrate_or_create_batch(
524            "phase 6 - builtin not admin entries",
525            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
526        )?;
527
528        self.internal_migrate_or_create_batch(
529            "phase 7 - builtin access control profiles",
530            migration_data::dl10::phase_7_builtin_access_control_profiles(),
531        )?;
532
533        self.reload()?;
534
535        // =========== OAuth2 Cryptography Migration ==============
536
537        debug!("START OAUTH2 MIGRATION");
538
539        // Load all the OAuth2 providers.
540        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
541            Attribute::Class,
542            EntryClass::OAuth2ResourceServer.into()
543        )))?;
544
545        if !all_oauth2_rs_entries.is_empty() {
546            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
547                let entry_uuid = tgt_entry.get_uuid();
548                let mut modlist = ModifyList::new_list(vec![
549                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
550                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
551                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
552                    // Delete the fernet key, rs256 if any, and the es256 key
553                    Modify::Purged(Attribute::OAuth2RsTokenKey),
554                    Modify::Purged(Attribute::Es256PrivateKeyDer),
555                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
556                ]);
557
558                trace!(?tgt_entry);
559
560                // Import the ES256 Key
561                if let Some(es256_private_der) =
562                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
563                {
564                    modlist.push_mod(Modify::Present(
565                        Attribute::KeyActionImportJwsEs256,
566                        Value::PrivateBinary(es256_private_der.to_vec()),
567                    ))
568                } else {
569                    warn!("Unable to migrate es256 key");
570                }
571
572                let has_rs256 = tgt_entry
573                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
574                    .unwrap_or(false);
575
576                // If there is an rs256 key, import it.
577                // Import the RS256 Key
578                if has_rs256 {
579                    modlist.push_mod(Modify::Present(
580                        Attribute::Class,
581                        EntryClass::KeyObjectJwtEs256.to_value(),
582                    ));
583
584                    if let Some(rs256_private_der) =
585                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
586                    {
587                        modlist.push_mod(Modify::Present(
588                            Attribute::KeyActionImportJwsRs256,
589                            Value::PrivateBinary(rs256_private_der.to_vec()),
590                        ))
591                    } else {
592                        warn!("Unable to migrate rs256 key");
593                    }
594                }
595
596                (entry_uuid, modlist)
597            });
598
599            self.internal_batch_modify(entry_iter)?;
600        }
601
602        // Reload for new keys, and updated oauth2
603        self.reload()?;
604
605        // Done!
606
607        Ok(())
608    }
609
610    /// Migration domain level 10 to 11 (1.7.0)
611    #[instrument(level = "info", skip_all)]
612    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
613        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
614            error!("Unable to raise domain level from 10 to 11.");
615            return Err(OperationError::MG0004DomainLevelInDevelopment);
616        }
617
618        Ok(())
619    }
620
621    #[instrument(level = "info", skip_all)]
622    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
623        admin_debug!("initialise_schema_core -> start ...");
624        // Load in all the "core" schema, that we already have in "memory".
625        let entries = self.schema.to_entries();
626
627        // admin_debug!("Dumping schemas: {:?}", entries);
628
629        // internal_migrate_or_create.
630        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
631            trace!(?e, "init schema entry");
632            self.internal_migrate_or_create(e)
633        });
634        if r.is_ok() {
635            admin_debug!("initialise_schema_core -> Ok!");
636        } else {
637            admin_error!(?r, "initialise_schema_core -> Error");
638        }
639        // why do we have error handling if it's always supposed to be `Ok`?
640        debug_assert!(r.is_ok());
641        r
642    }
643}
644
645impl QueryServerReadTransaction<'_> {
646    /// Retrieve the domain info of this server
647    pub fn domain_upgrade_check(
648        &mut self,
649    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
650        let d_info = &self.d_info;
651
652        let name = d_info.d_name.clone();
653        let uuid = d_info.d_uuid;
654        let current_level = d_info.d_vers;
655        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
656
657        let mut report_items = Vec::with_capacity(1);
658
659        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
660            let item = self
661                .domain_upgrade_check_7_to_8_security_keys()
662                .map_err(|err| {
663                    error!(
664                        ?err,
665                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
666                    );
667                    err
668                })?;
669            report_items.push(item);
670
671            let item = self
672                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
673                .map_err(|err| {
674                    error!(
675                        ?err,
676                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
677                    );
678                    err
679                })?;
680            report_items.push(item);
681        }
682
683        Ok(ProtoDomainUpgradeCheckReport {
684            name,
685            uuid,
686            current_level,
687            upgrade_level,
688            report_items,
689        })
690    }
691
692    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
693        &mut self,
694    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
695        let filter = filter!(f_and!([
696            f_eq(Attribute::Class, EntryClass::Account.into()),
697            f_pres(Attribute::PrimaryCredential),
698        ]));
699
700        let results = self.internal_search(filter)?;
701
702        let affected_entries = results
703            .into_iter()
704            .filter_map(|entry| {
705                if entry
706                    .get_ava_single_credential(Attribute::PrimaryCredential)
707                    .map(|cred| cred.has_securitykey())
708                    .unwrap_or_default()
709                {
710                    Some(entry.get_display_id())
711                } else {
712                    None
713                }
714            })
715            .collect::<Vec<_>>();
716
717        let status = if affected_entries.is_empty() {
718            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
719        } else {
720            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
721        };
722
723        Ok(ProtoDomainUpgradeCheckItem {
724            status,
725            from_level: DOMAIN_LEVEL_7,
726            to_level: DOMAIN_LEVEL_8,
727            affected_entries,
728        })
729    }
730
731    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
732        &mut self,
733    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
734        let filter = filter!(f_and!([
735            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
736            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
737        ]));
738
739        let results = self.internal_search(filter)?;
740
741        let affected_entries = results
742            .into_iter()
743            .map(|entry| entry.get_display_id())
744            .collect::<Vec<_>>();
745
746        let status = if affected_entries.is_empty() {
747            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
748        } else {
749            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
750        };
751
752        Ok(ProtoDomainUpgradeCheckItem {
753            status,
754            from_level: DOMAIN_LEVEL_7,
755            to_level: DOMAIN_LEVEL_8,
756            affected_entries,
757        })
758    }
759}
760
761#[cfg(test)]
762mod tests {
763    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
764    use crate::prelude::*;
765
766    #[qs_test]
767    async fn test_init_idempotent_schema_core(server: &QueryServer) {
768        {
769            // Setup and abort.
770            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
771            assert!(server_txn.initialise_schema_core().is_ok());
772        }
773        {
774            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
775            assert!(server_txn.initialise_schema_core().is_ok());
776            assert!(server_txn.initialise_schema_core().is_ok());
777            assert!(server_txn.commit().is_ok());
778        }
779        {
780            // Now do it again in a new txn, but abort
781            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
782            assert!(server_txn.initialise_schema_core().is_ok());
783        }
784        {
785            // Now do it again in a new txn.
786            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
787            assert!(server_txn.initialise_schema_core().is_ok());
788            assert!(server_txn.commit().is_ok());
789        }
790    }
791
792    #[qs_test(domain_level=DOMAIN_LEVEL_8)]
793    async fn test_migrations_dl8_dl9(server: &QueryServer) {
794        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
795
796        let db_domain_version = write_txn
797            .internal_search_uuid(UUID_DOMAIN_INFO)
798            .expect("unable to access domain entry")
799            .get_ava_single_uint32(Attribute::Version)
800            .expect("Attribute Version not present");
801
802        assert_eq!(db_domain_version, DOMAIN_LEVEL_8);
803
804        write_txn.commit().expect("Unable to commit");
805
806        // == pre migration verification. ==
807        // check we currently would fail a migration.
808
809        // let mut read_txn = server.read().await.unwrap();
810        // drop(read_txn);
811
812        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
813
814        // Fix any issues
815
816        // == Increase the version ==
817        write_txn
818            .internal_apply_domain_migration(DOMAIN_LEVEL_9)
819            .expect("Unable to set domain level to version 9");
820
821        // post migration verification.
822
823        write_txn.commit().expect("Unable to commit");
824    }
825
826    #[qs_test(domain_level=DOMAIN_LEVEL_9)]
827    async fn test_migrations_dl9_dl10(server: &QueryServer) {
828        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
829
830        let db_domain_version = write_txn
831            .internal_search_uuid(UUID_DOMAIN_INFO)
832            .expect("unable to access domain entry")
833            .get_ava_single_uint32(Attribute::Version)
834            .expect("Attribute Version not present");
835
836        assert_eq!(db_domain_version, DOMAIN_LEVEL_9);
837
838        write_txn.commit().expect("Unable to commit");
839
840        // == pre migration verification. ==
841        // check we currently would fail a migration.
842
843        // let mut read_txn = server.read().await.unwrap();
844        // drop(read_txn);
845
846        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
847
848        // Fix any issues
849
850        // == Increase the version ==
851        write_txn
852            .internal_apply_domain_migration(DOMAIN_LEVEL_10)
853            .expect("Unable to set domain level to version 10");
854
855        // post migration verification.
856
857        write_txn.commit().expect("Unable to commit");
858    }
859
860    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
861    async fn test_migrations_dl10_dl11(server: &QueryServer) {
862        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
863
864        let db_domain_version = write_txn
865            .internal_search_uuid(UUID_DOMAIN_INFO)
866            .expect("unable to access domain entry")
867            .get_ava_single_uint32(Attribute::Version)
868            .expect("Attribute Version not present");
869
870        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
871
872        write_txn.commit().expect("Unable to commit");
873
874        // == pre migration verification. ==
875        // check we currently would fail a migration.
876
877        // let mut read_txn = server.read().await.unwrap();
878        // drop(read_txn);
879
880        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
881
882        // Fix any issues
883
884        // == Increase the version ==
885        write_txn
886            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
887            .expect("Unable to set domain level to version 11");
888
889        // post migration verification.
890
891        write_txn.commit().expect("Unable to commit");
892    }
893}