@article{KroisGraetzHoltfreteretal.2019, author = {Joachim Krois and Christian Graetz and Birte Holtfreter and Paul-Georg Jost-Brinkmann and Thomas Kocher and Falk Schwendicke}, title = {Evaluating Modeling and Validation Strategies for Tooth Loss}, series = {Journal of Dental Research}, volume = {98}, number = {10}, publisher = {SAGE Publications}, address = {Sage CA: Los Angeles, CA}, issn = {0022-0345}, doi = {10.1177/0022034519864889}, url = {https://nbn-resolving.org/urn:nbn:de:gbv:9-opus-38160}, pages = {1088 -- 1095}, year = {2019}, abstract = {Prediction models learn patterns from available data (training) and are then validated on new data (testing). Prediction modeling is increasingly common in dental research. We aimed to evaluate how different model development and validation steps affect the predictive performance of tooth loss prediction models of patients with periodontitis. Two independent cohorts (627 patients, 11,651 teeth) were followed over a mean ± SD 18.2 ± 5.6 y (Kiel cohort) and 6.6 ± 2.9 y (Greifswald cohort). Tooth loss and 10 patient- and tooth-level predictors were recorded. The impact of different model development and validation steps was evaluated: 1) model complexity (logistic regression, recursive partitioning, random forest, extreme gradient boosting), 2) sample size (full data set or 10\%, 25\%, or 75\% of cases dropped at random), 3) prediction periods (maximum 10, 15, or 20 y or uncensored), and 4) validation schemes (internal or external by centers/time). Tooth loss was generally a rare event (880 teeth were lost). All models showed limited sensitivity but high specificity. Patients’ age and tooth loss at baseline as well as probing pocket depths showed high variable importance. More complex models (random forest, extreme gradient boosting) had no consistent advantages over simpler ones (logistic regression, recursive partitioning). Internal validation (in sample) overestimated the predictive power (area under the curve up to 0.90), while external validation (out of sample) found lower areas under the curve (range 0.62 to 0.82). Reducing the sample size decreased the predictive power, particularly for more complex models. Censoring the prediction period had only limited impact. When the model was trained in one period and tested in another, model outcomes were similar to the base case, indicating temporal validation as a valid option. No model showed higher accuracy than the no-information rate. In conclusion, none of the developed models would be useful in a clinical setting, despite high accuracy. During modeling, rigorous development and external validation should be applied and reported accordingly.}, language = {en} }