- verifica email se non è stata verificata (componente)

- altri aggiornamenti grafica PAGERIS.
- OLLAMA AI
This commit is contained in:
Surya Paolo
2025-12-11 18:34:39 +01:00
parent 6fdb101092
commit 89a8d10eae
44 changed files with 7915 additions and 3565 deletions

View File

@@ -0,0 +1,178 @@
/**
* useOllama - Composable Vue 3 per usare Ollama nelle tue applicazioni Quasar
*
* Esempio d'uso:
*
* import { useOllama } from './useOllama';
*
* const { generate, chat, isLoading, error } = useOllama({
* baseUrl: 'http://localhost:11434',
* model: 'llama3.2'
* });
*
* const result = await generate('Scrivi una poesia');
*/
import { ref, reactive } from 'vue';
import OllamaService from './OllamaService.js';
export function useOllama(options = {}) {
const {
baseUrl = 'http://localhost:11434',
model = 'llama3.2',
temperature = 0.7,
} = options;
// State
const isLoading = ref(false);
const error = ref(null);
const streamingContent = ref('');
const models = ref([]);
// Service instance
const service = new OllamaService(baseUrl);
service.setDefaultModel(model);
/**
* Genera testo
*/
const generate = async (prompt, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.generate({
prompt,
temperature,
...opts,
});
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Genera testo con streaming
*/
const generateStream = async (prompt, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.streamGenerate(
{ prompt, temperature, ...opts },
(chunk, full) => {
streamingContent.value = full;
}
);
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Chat con messaggi
*/
const chat = async (messages, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.chat({
messages,
temperature,
...opts,
});
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Chat con streaming
*/
const chatStream = async (messages, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.streamChat(
{ messages, temperature, ...opts },
(chunk, full) => {
streamingContent.value = full;
}
);
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Carica modelli disponibili
*/
const loadModels = async () => {
try {
models.value = await service.listModels();
return models.value;
} catch (e) {
error.value = e.message;
return [];
}
};
// Helper methods
const generateText = (prompt, opts) => service.generateText(prompt, opts);
const generateCode = (prompt, lang, opts) => service.generateCode(prompt, lang, opts);
const translate = (text, lang, opts) => service.translate(text, lang, opts);
const summarize = (text, opts) => service.summarize(text, opts);
const extractJSON = (text, schema, opts) => service.extractJSON(text, schema, opts);
const analyzeSentiment = (text, opts) => service.analyzeSentiment(text, opts);
return {
// State
isLoading,
error,
streamingContent,
models,
// Core methods
generate,
generateStream,
chat,
chatStream,
loadModels,
// Helper methods
generateText,
generateCode,
translate,
summarize,
extractJSON,
analyzeSentiment,
// Service access
service,
};
}
export default useOllama;