diff --git a/src/backends/onnx.js b/src/backends/onnx.js index b5923a596..b15a3e8a8 100644 --- a/src/backends/onnx.js +++ b/src/backends/onnx.js @@ -46,6 +46,9 @@ const DEVICE_TO_EXECUTION_PROVIDER_MAPPING = Object.freeze({ 'webnn-cpu': { name: 'webnn', deviceType: 'cpu' }, // WebNN CPU }); +/** @type {Array<'verbose' | 'info' | 'warning' | 'error' | 'fatal'>} */ +const LOG_LEVELS = ['verbose', 'info', 'warning', 'error', 'fatal']; + /** * The list of supported devices, sorted by priority/performance. * @type {import("../utils/devices.js").DeviceType[]} @@ -149,6 +152,19 @@ let webInitChain = Promise.resolve(); * @returns {Promise} The ONNX inference session. */ export async function createInferenceSession(buffer_or_path, session_options, session_config) { + + /** @type {0|1|2|3|4} */ + const logSeverityLevel = + typeof session_options.logSeverityLevel !== 'number' || + session_options.logSeverityLevel < 0 || + session_options.logSeverityLevel > 4 + ? 4 + : session_options.logSeverityLevel; + + ONNX_WEB.env.logLevel = LOG_LEVELS[logSeverityLevel]; + + session_options = { ...session_options, logSeverityLevel }; + const load = () => InferenceSession.create(buffer_or_path, session_options); const session = await (IS_WEB_ENV ? (webInitChain = webInitChain.then(load)) : load()); session.config = session_config; diff --git a/src/models.js b/src/models.js index fc3eed8ef..c77c488ec 100644 --- a/src/models.js +++ b/src/models.js @@ -241,6 +241,8 @@ async function getSession(pretrained_model_name_or_path, fileName, options, is_d // Overwrite `executionProviders` if not specified session_options.executionProviders ??= executionProviders; + // Set `logSeverityLevel` to 4 (fatal) if not specified + session_options.logSeverityLevel ??= 4; // Overwrite `freeDimensionOverrides` if specified in config and not set in session options const free_dimension_overrides = custom_config.free_dimension_overrides;