Browse Source

move prompt templates out of python bindings

go-opts
Jeffrey Morgan 3 years ago
parent
commit
9164981d72
  1. 8
      cmd/cmd.go
  2. 1
      llama/CMakeLists.txt
  3. 13
      server/routes.go
  4. 0
      templates/alpaca.prompt
  5. 0
      templates/falcon.prompt
  6. 0
      templates/gpt4.prompt
  7. 0
      templates/hermes.prompt
  8. 0
      templates/mpt.prompt
  9. 0
      templates/oasst.prompt
  10. 0
      templates/orca.prompt
  11. 0
      templates/qlora.prompt
  12. 0
      templates/tulu.prompt
  13. 0
      templates/ultralm.prompt
  14. 0
      templates/vicuna.prompt
  15. 0
      templates/wizardcoder.prompt
  16. 0
      templates/wizardlm.prompt

8
cmd/cmd.go

@ -59,7 +59,7 @@ func NewCLI() *cobra.Command {
rootCmd := &cobra.Command{
Use: "ollama",
Short: "Run any large language model on any machine.",
Short: "Large language model runner",
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
},
@ -88,14 +88,13 @@ func NewCLI() *cobra.Command {
},
}
/*
runCmd := &cobra.Command{
Use: "run",
Short: "Run a model and submit prompts.",
RunE: func(cmd *cobra.Command. args []string) error {
RunE: func(cmd *cobra.Command,args []string) error {
return nil
},
}
*/
serveCmd := &cobra.Command{
Use: "serve",
@ -132,6 +131,7 @@ func NewCLI() *cobra.Command {
rootCmd.AddCommand(
modelsCmd,
serveCmd,
runCmd,
)
return rootCmd

1
llama/CMakeLists.txt

@ -23,5 +23,6 @@ target_include_directories(binding PRIVATE ${llama_cpp_SOURCE_DIR})
target_include_directories(binding PRIVATE ${llama_cpp_SOURCE_DIR}/examples)
target_link_libraries(binding llama ggml_static)
configure_file(${llama_cpp_SOURCE_DIR}/ggml-metal.metal ${CMAKE_CURRENT_BINARY_DIR}/ggml-metal.metal COPYONLY)
configure_file(${llama_cpp_BINARY_DIR}/libllama.a ${CMAKE_CURRENT_BINARY_DIR}/libllama.a COPYONLY)
configure_file(${llama_cpp_BINARY_DIR}/libggml_static.a ${CMAKE_CURRENT_BINARY_DIR}/libggml_static.a COPYONLY)

13
server/routes.go

@ -36,6 +36,8 @@ func Serve(ln net.Listener) error {
})
r.POST("/api/generate", func(c *gin.Context) {
// TODO: set prompt from template
var req api.GenerateRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
@ -64,13 +66,10 @@ func Serve(ln net.Listener) error {
return true
})
/*
embeds, err := l.Embeddings(text)
if err != nil {
fmt.Printf("Embeddings: error %s \n", err.Error())
}
*/
// embeds, err := l.Embeddings(text)
// if err != nil {
// fmt.Printf("Embeddings: error %s \n", err.Error())
// }
})
log.Printf("Listening on %s", ln.Addr())

0
python/ollama/templates/alpaca.prompt → templates/alpaca.prompt

0
python/ollama/templates/falcon.prompt → templates/falcon.prompt

0
python/ollama/templates/gpt4.prompt → templates/gpt4.prompt

0
python/ollama/templates/hermes.prompt → templates/hermes.prompt

0
python/ollama/templates/mpt.prompt → templates/mpt.prompt

0
python/ollama/templates/oasst.prompt → templates/oasst.prompt

0
python/ollama/templates/orca.prompt → templates/orca.prompt

0
python/ollama/templates/qlora.prompt → templates/qlora.prompt

0
python/ollama/templates/tulu.prompt → templates/tulu.prompt

0
python/ollama/templates/ultralm.prompt → templates/ultralm.prompt

0
python/ollama/templates/vicuna.prompt → templates/vicuna.prompt

0
python/ollama/templates/wizardcoder.prompt → templates/wizardcoder.prompt

0
python/ollama/templates/wizardlm.prompt → templates/wizardlm.prompt

Loading…
Cancel
Save