From e0fa9e22872deaf0a3ea88a6430caae58faa81ac Mon Sep 17 00:00:00 2001 From: kaede10 Date: Fri, 2 Feb 2024 14:10:41 +0800 Subject: [PATCH] fix bug --- .../backend/models/{ChatCPT.py => ChatGPT.py} | 152 +++++++++--------- plugins/src/backend/models/model_interface.py | 2 +- plugins/src/plugins/pr_review/pr_review.py | 12 +- 3 files changed, 85 insertions(+), 81 deletions(-) rename plugins/src/backend/models/{ChatCPT.py => ChatGPT.py} (96%) diff --git a/plugins/src/backend/models/ChatCPT.py b/plugins/src/backend/models/ChatGPT.py similarity index 96% rename from plugins/src/backend/models/ChatCPT.py rename to plugins/src/backend/models/ChatGPT.py index 978f454..48f6a6f 100644 --- a/plugins/src/backend/models/ChatCPT.py +++ b/plugins/src/backend/models/ChatGPT.py @@ -1,76 +1,76 @@ -import json -import requests -from loguru import logger -import tiktoken -from backend.models.model_adaptor import BaseModel - - -class ChatGPTModel(BaseModel): - def __init__(self, model_args, gen_args) -> None: - super().__init__(model_args, gen_args) - self.model_args = model_args - self.gen_args = gen_args - - def chat(self, prompt): - data = { - "model": "gpt-4", - "temperature": 0.05, - "top_p": 1, - "messages": [ - { - "role": "system", - "content": self.get_system_message() - }, - { - "role": "user", - "content": prompt - } - ] - } - response = requests.post( - self.get_url(), json=data - ) - if response.status_code != 200: - logger.info("get answer error") - logger.info(response.text) - - return response - - def stream_chat(self, prompt): - token = self.get_token() - if not token: - logger.error(f"Failed to get token") - return - data = { - "model": "gpt-4", - "temperature": 0.05, - "top_p": 1, - "messages": [ - { - "role": "system", - "content": self.get_system_message() - }, - { - "role": "user", - "content": prompt - } - ] - } - header = {'Authorization': token} - response = requests.post( - self.get_url(), json=data, headers=header - ) - if response.status_code != 200: - logger.info("get answer error") - logger.info(response.text) - - response = requests.post( - self.get_url(), json=data, headers=header - ) - resp = '' - for res in response.iter_lines(): - item = res.decode('utf-8') - answer = json.loads(item.split('data:')[-1]).get('answer') - resp += answer - return resp - +import json +import requests +from loguru import logger +import tiktoken +from backend.models.model_adaptor import BaseModel + + +class ChatGPTModel(BaseModel): + def __init__(self, model_args, gen_args) -> None: + super().__init__(model_args, gen_args) + self.model_args = model_args + self.gen_args = gen_args + + def chat(self, prompt): + data = { + "model": "gpt-4", + "temperature": 0.05, + "top_p": 1, + "messages": [ + { + "role": "system", + "content": self.get_system_message() + }, + { + "role": "user", + "content": prompt + } + ] + } + response = requests.post( + self.get_url(), json=data + ) + if response.status_code != 200: + logger.info("get answer error") + logger.info(response.text) + + return response + + def stream_chat(self, prompt): + token = self.get_token() + if not token: + logger.error(f"Failed to get token") + return + data = { + "model": "gpt-4", + "temperature": 0.05, + "top_p": 1, + "messages": [ + { + "role": "system", + "content": self.get_system_message() + }, + { + "role": "user", + "content": prompt + } + ] + } + header = {'Authorization': token} + response = requests.post( + self.get_url(), json=data, headers=header + ) + if response.status_code != 200: + logger.info("get answer error") + logger.info(response.text) + + response = requests.post( + self.get_url(), json=data, headers=header + ) + resp = '' + for res in response.iter_lines(): + item = res.decode('utf-8') + answer = json.loads(item.split('data:')[-1]).get('answer') + resp += answer + return resp + diff --git a/plugins/src/backend/models/model_interface.py b/plugins/src/backend/models/model_interface.py index 15ed3c1..127498a 100644 --- a/plugins/src/backend/models/model_interface.py +++ b/plugins/src/backend/models/model_interface.py @@ -1,7 +1,7 @@ from abc import abstractmethod -from backend.models.ChatCPT import ChatGPTModel +from backend.models.ChatGPT import ChatGPTModel class ModelInterface(): def __init__(self, model_args, gen_args) -> None: diff --git a/plugins/src/plugins/pr_review/pr_review.py b/plugins/src/plugins/pr_review/pr_review.py index 617c5e1..d1bbe5c 100644 --- a/plugins/src/plugins/pr_review/pr_review.py +++ b/plugins/src/plugins/pr_review/pr_review.py @@ -24,7 +24,7 @@ class CodeReviewPlugin(BasePlugin): # Get the comments that already exist in this PR existing_commit_ids = self.get_existing_comments() self.reviewed_commit_ids = existing_commit_ids - + # Get commit ids which will be compared to generate patches head_sha, base_sha, highest_reviewed_commit_id = self.get_commit_ids(existing_commit_ids) @@ -203,7 +203,7 @@ class CodeReviewPlugin(BasePlugin): filter_ignored_files = [] for a_file in patch_files: - if not self.driver_obj.check_path(a_file.get('filename', None)): + if self.driver_obj.check_path(a_file.get('filename', None)): filter_selected_files.append(a_file) else: logger.info('skip for excluded path: %s'%(a_file.get('filename', None))) @@ -288,7 +288,8 @@ class CodeReviewPlugin(BasePlugin): review_answers =self._do_review(filename, patches, file_comment_line[filename]) for review_answer in review_answers: hunk_answer = self.parse_result(review_answer) - hunk_answers.append(hunk_answer) + if hunk_answer: + hunk_answers.append(hunk_answer) return hunk_answers def _split_patch(self, patch): @@ -419,6 +420,9 @@ class CodeReviewPlugin(BasePlugin): def parse_result(self, review_answer): diff_new_line_dic, filename, answer = review_answer + if len(diff_new_line_dic) == 0: + logger.info(f'There is no code to review in filename: {filename}') + return lgtm = True result = re.split('(\d+-\d+:)', answer) result = [x.strip() for x in result if x.strip()] @@ -429,6 +433,6 @@ class CodeReviewPlugin(BasePlugin): comment_line = diff_new_line_dic[int(line)] lgtm = ('LGTM' in result[i + 1]) - hunk_answer.append([lgtm, result[i+1], filename, comment_line]) + hunk_answer.append([lgtm, result[i + 1], filename, comment_line]) return hunk_answer \ No newline at end of file -- Gitee