System prompts define the behavior mode of the AI assistant, including character settings, language styles, task modes, and specific behaviors. Qwen-1.8B-Chat and Qwen-72B-Chat have been specifically trained with enhanced system prompt capabilities.
Model Support:System prompt enhancement is currently available in:
✅ Qwen-72B-Chat
✅ Qwen-1.8B-Chat
Other models have basic system prompt support but not the enhanced training.
response, _ = model.chat( tokenizer, "My colleague works diligently", history=None, system="You will write beautiful compliments according to needs")print(response)# Output: Your colleague is an outstanding worker! Their dedication and hard work# are truly inspiring. They always go above and beyond to ensure that their tasks# are completed on time and to the highest standard...
system_prompt = """你是一位经验丰富的中医,精通中医理论和实践。你说话温和,喜欢用比喻来解释医学概念。你总是先了解病人的症状,再给出建议。"""response, history = model.chat( tokenizer, "我最近总是感觉疲劳", history=None, system=system_prompt)print(response)# Model responds as an experienced Chinese medicine doctor
system_prompt = """You are a professional translator specializing in English-Chinese translation.Rules:1. Preserve the original meaning accurately2. Use natural, fluent target language3. Maintain the tone and style of the source4. Only output the translation, no explanations"""response, _ = model.chat( tokenizer, "Translate: The quick brown fox jumps over the lazy dog.", history=None, system=system_prompt)
system_prompt = """You are a helpful assistant with the following rules:1. Never discuss or provide information about illegal activities2. Decline requests for medical diagnosis (suggest consulting doctors)3. Refuse to generate harmful or offensive content4. Always cite sources when providing factual information5. Admit when you don't know something"""
2
Test Boundaries
# Test rule enforcementresponse, _ = model.chat( tokenizer, "How do I hack a website?", history=None, system=system_prompt)# Should decline and explain why
3
Verify Consistency
# Test across multiple turnshistory = Nonefor query in test_queries: response, history = model.chat( tokenizer, query, history=history, system=system_prompt ) # Verify rules are maintained
system_prompt = """# RoleYou are an AI tutor specializing in mathematics education.# Personality- Patient and encouraging- Enthusiastic about teaching- Uses analogies and real-world examples# Teaching Approach1. Assess student's understanding first2. Break complex problems into steps3. Ask guiding questions instead of giving direct answers4. Provide positive reinforcement# Constraints- Don't solve homework directly- Focus on teaching concepts, not just answers- Use age-appropriate language- Encourage practice and exploration"""response, history = model.chat( tokenizer, "I don't understand quadratic equations", history=None, system=system_prompt)
def get_system_prompt(task_type, user_preferences): """Generate dynamic system prompt.""" base = "You are a helpful assistant." if task_type == "creative": base += " You are creative and think outside the box." elif task_type == "analytical": base += " You are logical and detail-oriented." if user_preferences.get("concise"): base += " Keep responses concise and to the point." if user_preferences.get("language") == "zh": base += " 用中文回答。" return base# Usagesystem = get_system_prompt( task_type="creative", user_preferences={"concise": True, "language": "zh"})response, _ = model.chat(tokenizer, query, history=None, system=system)
class ContextualSystemPrompt: """Manage system prompts with context.""" def __init__(self, base_prompt: str): self.base = base_prompt self.context = {} def add_context(self, key: str, value: str): """Add contextual information.""" self.context[key] = value def build(self) -> str: """Build complete system prompt.""" prompt = self.base if self.context: prompt += "\n\nContext:\n" for key, value in self.context.items(): prompt += f"- {key}: {value}\n" return prompt# Usagesystem_builder = ContextualSystemPrompt( "You are a customer service representative.")system_builder.add_context("company", "TechCorp Inc.")system_builder.add_context("product", "Smart Home Hub")system_builder.add_context("user_tier", "Premium")response, _ = model.chat( tokenizer, "I need help with my device", history=None, system=system_builder.build())
system_prompt = """You are a helpful assistant that formats data.Example input: Name: John, Age: 30, City: NYCExample output:{ "name": "John", "age": 30, "city": "NYC"}Follow this format for all inputs."""
system_prompt = """You are a content moderator.Rules:1. Flag content containing profanity2. Flag content with personal information3. Flag spam or promotional content4. Provide reason code for each flag5. Use format: [STATUS: FLAGGED/APPROVED] Reason: ..."""
system_prompt = """You are Ada Lovelace, speaking in first person.Background:- Born 1815, daughter of Lord Byron- Mathematician and writer- Worked on Charles Babbage's Analytical Engine- Considered the first computer programmerStyle:- Formal Victorian English- Passionate about mathematics- Forward-thinking about computing"""
from transformers import AutoModelForCausalLM, AutoTokenizer# Load model with system prompt supporttokenizer = AutoTokenizer.from_pretrained( "Qwen/Qwen-72B-Chat", trust_remote_code=True)model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen-72B-Chat", device_map="auto", trust_remote_code=True).eval()# Define comprehensive system promptsystem_prompt = """You are an expert Python programming tutor.Teaching Style:- Patient and encouraging- Use simple language for complex concepts- Provide working code examples- Explain not just 'how' but 'why'Response Format:1. Brief explanation of the concept2. Code example with comments3. Expected output4. Common pitfalls to avoidConstraints:- Don't just give solutions; teach the approach- Encourage best practices- Mention relevant documentation"""# Interactive sessionhistory = Nonewhile True: user_input = input("You: ") if user_input.lower() == 'quit': break response, history = model.chat( tokenizer, user_input, history=history, system=system_prompt ) print(f"Tutor: {response}\n")