mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-23 22:32:54 +08:00
fix typo
Former-commit-id: 92fa515e977889bf1003f174785fe600db14f70e
This commit is contained in:
parent
b064d205c6
commit
4ec1bff116
@ -9,7 +9,7 @@
|
||||
|
||||
## Changelog
|
||||
|
||||
[23/06/29] We provide a reproducible example of training a chat model using instruction-following datasets, see this [HuggingFace Repo](https://huggingface.co/baichuan-inc/baichuan-7B) for details.
|
||||
[23/06/29] We provide a reproducible example of training a chat model using instruction-following datasets, see this [HuggingFace Repo](https://huggingface.co/hiyouga/baichuan-7b-sft) for details.
|
||||
|
||||
[23/06/22] Now we align the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in arbitrary ChatGPT-based applications.
|
||||
|
||||
|
@ -10,6 +10,7 @@ import uvicorn
|
||||
from threading import Thread
|
||||
from pydantic import BaseModel, Field
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from contextlib import asynccontextmanager
|
||||
from transformers import TextIteratorStreamer
|
||||
from starlette.responses import StreamingResponse
|
||||
@ -34,6 +35,15 @@ async def lifespan(app: FastAPI): # collects GPU memory
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
class ModelCard(BaseModel):
|
||||
id: str
|
||||
object: str = "model"
|
||||
|
Loading…
x
Reference in New Issue
Block a user