Dies wird aus Chat -GPT generiert, aber das Problem ist, dass es den oberen, rechten linken Blick nicht registriert. Und nicht aufhören zu scrollen, während sie blinzelt. Es gibt nur schnell Blink aus, das Scolling erfasst und auf das Scrollen gestoppt wurde, aber nicht anhalten. = "Lang-none hübschprint-override">
import mediapipe as mp
import time
import numpy as np
import psutil
import pyautogui
import json
# Initialize Mediapipe face mesh
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5, refine_landmarks=True)
# Open camera
cap = cv2.VideoCapture(0)
# Variables for tracking
blink_count = 0
last_blink_time = 0
look_start_time = None # Initialize as None to avoid errors
blink_duration = 0
gaze_fixation_time = 3 # Time required to fix gaze before allowing blinks
gaze_confirmed = False # Flag to ensure gaze is locked before registering blinks
scrolling = False # Flag to track scrolling state
gaze_data = {}
# Load saved gaze data if available
def load_saved_gaze():
global gaze_data
try:
with open("eye_gaze_log.txt", "r") as file:
gaze_data = json.load(file)
except FileNotFoundError:
gaze_data = {}
# Save gaze data
def save_log():
with open("eye_gaze_log.txt", "w") as file:
json.dump(gaze_data, file)
# Check if Chrome is running
def is_chrome_open():
for process in psutil.process_iter(attrs=['name']):
if "chrome" in process.info['name'].lower():
return True
return False
# Detect blinks
def detect_blink(face_landmarks):
global blink_count, last_blink_time, blink_duration
left_eye_blink = face_landmarks.landmark[159].y - face_landmarks.landmark[145].y
right_eye_blink = face_landmarks.landmark[386].y - face_landmarks.landmark[374].y
if left_eye_blink < 0.02 and right_eye_blink < 0.02:
current_time = time.time()
if blink_count == 0:
blink_count = 1
last_blink_time = current_time
elif current_time - last_blink_time < 1:
blink_count += 1
blink_duration = round(current_time - last_blink_time, 2)
last_blink_time = current_time
if blink_count == 2:
blink_count = 0
return True
else:
blink_count = 0
return False
# Wait for initial double blink
def wait_for_double_blink():
print("Blink twice continuously to start recording data...")
while True:
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = face_mesh.process(frame_rgb)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
if detect_blink(face_landmarks):
print("Double blink detected! Starting gaze recording...")
return
cv2.imshow("Eye Tracker", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def record_gaze(direction):
global gaze_confirmed, gaze_data, look_start_time
print(f"Look at {direction} for {gaze_fixation_time} seconds and then blink twice to confirm...")
look_start_time = time.time()
while True:
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = face_mesh.process(frame_rgb)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
if look_start_time and time.time() - look_start_time >= gaze_fixation_time:
gaze_confirmed = True
if gaze_confirmed and detect_blink(face_landmarks):
print(f"Gaze position for {direction} recorded!")
gaze_data[direction] = [(lm.x, lm.y) for lm in face_landmarks.landmark]
save_log()
gaze_confirmed = False
for i in range(3, 0, -1):
print(f"Next location in {i} seconds...")
time.sleep(1)
return
cv2.putText(frame, f"Tracking: {direction}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Eye Tracker", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def start_scrolling(direction):
global scrolling
print(f"Scrolling {direction}...")
scrolling = True
if direction == "Top":
pyautogui.scroll(10)
elif direction == "Bottom":
pyautogui.scroll(-10)
elif direction == "Right":
pyautogui.hscroll(10)
elif direction == "Left":
pyautogui.hscroll(-10)
def stop_scrolling():
global scrolling
scrolling = False
print("Scrolling stopped.")
def control_scrolling():
global scrolling, look_start_time
while True:
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = face_mesh.process(frame_rgb)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for direction, gaze_positions in gaze_data.items():
current_position = [(lm.x, lm.y) for lm in face_landmarks.landmark]
if any(abs(current_position[i][0] - gaze_positions[i][0]) < 0.01 and abs(current_position[i][1] - gaze_positions[i][1]) < 0.01 for i in range(min(len(current_position), len(gaze_positions)))):
if not scrolling:
start_scrolling(direction)
elif detect_blink(face_landmarks):
stop_scrolling()
return
cv2.imshow("Eye Tracker", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Execution sequence
load_saved_gaze()
if is_chrome_open():
wait_for_double_blink()
for direction in ['Top', 'Bottom', 'Right', 'Left']:
record_gaze(direction)
print("Gaze positions recorded. Starting scrolling detection...")
control_scrolling()
cap.release()
cv2.destroyAllWindows()
Dies wird aus Chat -GPT generiert, aber das [url=viewtopic.php?t=11587]Problem[/url] ist, dass es den oberen, rechten linken Blick nicht registriert. Und nicht aufhören zu scrollen, während sie blinzelt. Es gibt nur schnell Blink aus, das Scolling erfasst und auf das Scrollen gestoppt wurde, aber nicht anhalten. = "Lang-none hübschprint-override">[code]import mediapipe as mp import time import numpy as np import psutil import pyautogui import json
# Variables for tracking blink_count = 0 last_blink_time = 0 look_start_time = None # Initialize as None to avoid errors blink_duration = 0 gaze_fixation_time = 3 # Time required to fix gaze before allowing blinks gaze_confirmed = False # Flag to ensure gaze is locked before registering blinks scrolling = False # Flag to track scrolling state gaze_data = {}
# Load saved gaze data if available def load_saved_gaze(): global gaze_data try: with open("eye_gaze_log.txt", "r") as file: gaze_data = json.load(file) except FileNotFoundError: gaze_data = {}
# Save gaze data def save_log(): with open("eye_gaze_log.txt", "w") as file: json.dump(gaze_data, file)
# Check if Chrome is running def is_chrome_open(): for process in psutil.process_iter(attrs=['name']): if "chrome" in process.info['name'].lower(): return True return False
# Wait for initial double blink def wait_for_double_blink(): print("Blink twice continuously to start recording data...") while True: ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = face_mesh.process(frame_rgb) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: if detect_blink(face_landmarks): print("Double blink detected! Starting gaze recording...") return cv2.imshow("Eye Tracker", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
def record_gaze(direction): global gaze_confirmed, gaze_data, look_start_time print(f"Look at {direction} for {gaze_fixation_time} seconds and then blink twice to confirm...") look_start_time = time.time() while True: ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = face_mesh.process(frame_rgb) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: if look_start_time and time.time() - look_start_time >= gaze_fixation_time: gaze_confirmed = True if gaze_confirmed and detect_blink(face_landmarks): print(f"Gaze position for {direction} recorded!") gaze_data[direction] = [(lm.x, lm.y) for lm in face_landmarks.landmark] save_log() gaze_confirmed = False for i in range(3, 0, -1): print(f"Next location in {i} seconds...") time.sleep(1) return cv2.putText(frame, f"Tracking: {direction}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) cv2.imshow("Eye Tracker", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
def start_scrolling(direction): global scrolling print(f"Scrolling {direction}...") scrolling = True if direction == "Top": pyautogui.scroll(10) elif direction == "Bottom": pyautogui.scroll(-10) elif direction == "Right": pyautogui.hscroll(10) elif direction == "Left": pyautogui.hscroll(-10)
def stop_scrolling(): global scrolling scrolling = False print("Scrolling stopped.")
def control_scrolling(): global scrolling, look_start_time while True: ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = face_mesh.process(frame_rgb) if results.multi_face_landmarks: for face_landmarks in results.multi_face_landmarks: for direction, gaze_positions in gaze_data.items(): current_position = [(lm.x, lm.y) for lm in face_landmarks.landmark] if any(abs(current_position[i][0] - gaze_positions[i][0]) < 0.01 and abs(current_position[i][1] - gaze_positions[i][1]) < 0.01 for i in range(min(len(current_position), len(gaze_positions)))): if not scrolling: start_scrolling(direction) elif detect_blink(face_landmarks): stop_scrolling() return cv2.imshow("Eye Tracker", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
# Execution sequence load_saved_gaze() if is_chrome_open(): wait_for_double_blink() for direction in ['Top', 'Bottom', 'Right', 'Left']: record_gaze(direction) print("Gaze positions recorded. Starting scrolling detection...") control_scrolling()
Erstellen Sie eine Klasse und eine Unterklasse -Struktur, die Universitätsangestellte darstellt. In dieser Struktur muss es eine Superklasse geben, die allen Mitarbeitern gemeinsam ist. /> Erstellen...
Drucken der Konsole
Was ist das? Was bedeutet diese Zahl? Übrigens: Dies ist ein Tetris -Klon und jedes Mal, wenn ich ein Stück färbt, erhöht sich die Zahl um 200.
Drucken der Konsole
Was ist das? Was bedeutet diese Zahl? Übrigens: Dies ist ein Tetris -Klon und jedes Mal, wenn ich ein Stück färbt, erhöht sich die Zahl um 200.
In meinem ASP.NET MVC (C#) -Projekt muss ich erfahren, ob ein Benutzerkennwort abgelaufen ist oder nicht? Ich habe einige Antworten dazu im Internet gefunden, aber sie waren für mich nicht nützlich....