last vis in office computer:
"""import mne
import pandas as pd
import numpy as np
import matplotlib
# Use the Qt5Agg backend for Matplotlib compatible with PyQt5
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
# Define a function to get microvolt value
def get_microvolt_value(raw, channel_name, time_of_interest):
ch_index = raw.ch_names.index(channel_name)
sample_index = int(time_of_interest * raw.info['sfreq'])
data, times = raw[:, sample_index:sample_index + 1]
return data[ch_index, 0] * 1e6 # Convert from V to µV
# Set interactive mode for live updates if needed
plt.ion()
# EEG data processing code
file_path = '/home/ghada/test/OpenBCI-RAW-2024-02-12_08-19-15_relax_laugh.txt'
data = pd.read_csv(file_path, skiprows=7, header=None)
eeg_data = data.iloc[:, 1:17].to_numpy().T # Transpose to match MNE's data structure
sfreq = 125 # Sampling frequency in Hz
# Directly set the channel names to match the standard 10-20 system
ch_names = [
'Fp1', 'Fp2', 'F3', 'F4',
'C3', 'C4', 'P3', 'P4',
'O1', 'O2', 'F7', 'F8',
'T7', 'T8', 'P7', 'P8'
]
# Define channel names according to data layout
ch_types = ['eeg'] * 16
# Create an MNE Info object with the direct channel names
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
# Create the RawArray object using the EEG data and the metadata info
raw = mne.io.RawArray(eeg_data, info)
montage = mne.channels.make_standard_montage('standard_1020')
raw.set_montage(montage)
print(raw)
print(raw.info)
# Extract data from the raw object
data, times = raw[:]
# Print the shape of the data to understand its structure
print(data.shape) # This should show (16, 3320) for your case
# Print the first 5 time points for the first 5 channels
print("First 5 time points for the first 5 channels:")
print(data[:5, :5])
# Print corresponding times
print("Corresponding times for the first 5 time points:")
print(times[:15])
raw.compute_psd(fmax=60).plot(picks="data", exclude="bads")
#Requested fmax (100 Hz) must not exceed ½ the sampling frequency of the data (62.5 Hz).
#raw.plot(block=True, duration=5, n_channels=30)
raw.plot(block=True, n_channels=16)
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=16, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
ica.plot_properties(raw, picks=ica.exclude)
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
chs = [
"Fp1",
"Fp2",
"F3",
"F4",
"C3",
"C4",
"P3",
"P4",
"O1",
"O2",
"F7",
"F8",
"T7",
"T8",
"P7",
"P8",
]
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
orig_raw.plot(order=chan_idxs, start=12, duration=4)
orig_raw.plot().savefig('1.png')
raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot().savefig('2.png')
plt.show()
# Print microvolt value for a specific channel and time
print(get_microvolt_value(orig_raw, 'Fp1', 2.5))
# Print microvolt value for a specific channel and time
print(get_microvolt_value(raw, 'Fp1', 2.5))"""
"""import mne
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg') # Use the Qt5Agg backend
def get_microvolt_value(raw, channel_name, time_of_interest):
ch_index = raw.ch_names.index(channel_name)
sample_index = int(time_of_interest * raw.info['sfreq'])
data, times = raw[:, sample_index:sample_index + 1]
return data[ch_index, 0] * 1e6 # Convert from V to µV
file_path = '/home/ghada/test/OpenBCI-RAW-2024-02-12_08-19-15_relax_laugh.txt'
data = pd.read_csv(file_path, skiprows=7, header=None)
eeg_data = data.iloc[:, 1:17].to_numpy().T # Transpose to match MNE's data structure
sfreq = 125 # Sampling frequency in Hz
ch_names = ['Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', 'F8', 'T7', 'T8', 'P7', 'P8']
ch_types = ['eeg'] * 16
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = mne.io.RawArray(eeg_data, info)
montage = mne.channels.make_standard_montage('standard_1020')
raw.set_montage(montage)
print(raw)
print(raw.info)
# Visualizing the raw data with montage
raw.plot(duration=5, n_channels=16, title="Raw EEG Data with Montage")
# Initialize and fit ICA
ica = mne.preprocessing.ICA(n_components=16, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # Exclude components identified as artifacts
ica.apply(raw) # Apply ICA to remove excluded components
# Save the plots
raw.plot_psd(fmax=60) # PSD plot before ICA
raw.plot(duration=5, n_channels=16, title="Cleaned EEG Data").savefig('cleaned_data.png')
plt.show() # Ensure all plots are shown
# Print microvolt values at specific times for comparison
print("Original Fp1 at 2.5s:", get_microvolt_value(raw, 'Fp1', 2.5))"""
import mne
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg') # Use the Qt5Agg backend
# Function to get microvolt values from raw EEG data
def get_microvolt_value(raw, channel_name, time_of_interest):
ch_index = raw.ch_names.index(channel_name)
sample_index = int(time_of_interest * raw.info['sfreq'])
data, times = raw[:, sample_index:sample_index + 1]
return data[ch_index, 0] * 1e6 # Convert from V to µV
# Ultracortex Mark IV
#file_path = '/home/ghada/test/OpenBCI-RAW-2024-02-12_08-19-15_relax_laugh.txt'
#file_path = '/home/ghada/Documents/OpenBCI_GUI/Recordings/OpenBCISession_2024-02-12_11-33-28/OpenBCI-RAW-2024-02-12_11-37-56.txt'
# GelFree cap
#file_path = '/home/ghada/Documents/OpenBCI_GUI/Recordings/OpenBCISession_2024-02-12_10-42-47/OpenBCI-RAW-2024-02-12_10-43-35.txt'
file_path = '/home/ghada/Documents/OpenBCI_GUI/Recordings/OpenBCISession_N/A/OpenBCI-RAW-2024-01-23_11-01-15.txt'
# Load the EEG data from the file, skipping the header rows
data = pd.read_csv(file_path, skiprows=7, header=None)
# Extract EEG data and transpose to match MNE's data structure (channels x samples)
eeg_data = data.iloc[:, 1:17].to_numpy().T
# Define the sampling frequency
sfreq = 125 # Sampling frequency in Hz
# Define channel names according to the standard 10-20 system
ch_names = [
'Fp1', 'Fp2', 'F3', 'F4',
'C3', 'C4', 'P3', 'P4',
'O1', 'O2', 'F7', 'F8',
'T7', 'T8', 'P7', 'P8'
]
ch_types = ['eeg'] * 16 # All channels are EEG type
# Create an MNE Info object with the channel names and types
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
# Create the RawArray object using the EEG data and the info object
raw = mne.io.RawArray(eeg_data, info)
# Apply the standard 10-20 montage to the raw data
montage = mne.channels.make_standard_montage('standard_1020')
raw.set_montage(montage)
# Visualize the raw EEG data
raw.plot(duration=5, n_channels=16, title="Raw EEG Data with Montage")
# Optionally, show the power spectral density
raw.plot_psd(fmax=60) # Set the maximum frequency to display in the PSD
plt.show()
vis in labtop:
import mne
import pandas as pd
import matplotlib.pyplot as plt
try:
# EEG data processing code
file_path = '/home/capture/Documents/OpenBCI_GUI/Recordings/OpenBCISession_2024-02-12_08-19-47/OpenBCI-RAW-2024-02-12_08-21-02_blinking.txt'
data = pd.read_csv(file_path, skiprows=7, header=None)
eeg_data = data.iloc[:, 1:17].to_numpy().T # Transpose to match MNE's data structure
sfreq = 125 # Sampling frequency in Hz
# Directly set the channel names to match the standard 10-20 system
ch_names = [
'Fp1', 'Fp2', 'F3', 'F4',
'C3', 'C4', 'P3', 'P4',
'O1', 'O2', 'F7', 'F8',
'T7', 'T8', 'P7', 'P8'
]
# Define channel names according to data layout
ch_types = ['eeg'] * 16
# Create an MNE Info object with the direct channel names
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
# Create the RawArray object using the EEG data and the metadata info
raw = mne.io.RawArray(eeg_data, info)
print(raw)
print(raw.info)
# Extract data from the raw object
data, times = raw[:]
# Print the shape of the data to understand its structure
print(data.shape) # This should show (16, 3320) for your case
# Print the first 5 time points for the first 5 channels
print("First 5 time points for the first 5 channels:")
print(data[:5, :5])
# Print corresponding times
print("Corresponding times for the first 5 time points:")
print(times[:15])
#raw.compute_psd(fmax=60).plot(picks="data", exclude="bads")
#Requested fmax (100 Hz) must not exceed ½ the sampling frequency of the data (62.5 Hz).
#raw.plot(block=True, duration=5, n_channels=30)
#raw.plot(block=True, n_channels=16)
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=16, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
#ica.plot_properties(raw, picks=ica.exclude)
# Apply ICA transformation
ica.apply(raw)
chs = [
"Fp1",
"Fp2",
"F3",
"F4",
"C3",
"C4",
"P3",
"P4",
"O1",
"O2",
"F7",
"F8",
"T7",
"T8",
"P7",
"P8",
]
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
#orig_raw.plot(order=chan_idxs, start=12, duration=4)
orig_raw.plot().savefig('1.png')
#raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot().savefig('2.png')
plt.show()
# Print microvolt value for a specific channel and time
print(get_microvolt_value(orig_raw, 'Fp1', 2.5))
# Print microvolt value for a specific channel and time
print(get_microvolt_value(raw, 'Fp1', 2.5
zedSSVPEP
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import pyglet
from pyglet import shapes
from pyglet.gl import gl, GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST
import os
import threading
import queue
import shutil
import time
import numpy as np
import message_filters
# Constants
line_thickness = 7
dash_length = 20
gap_length = 10
circle_radius = 50
class PygletROSNode:
def __init__(self):
rospy.init_node('pyglet_ros_node', anonymous=True)
self.bridge = CvBridge()
#self.subscriber = rospy.Subscriber('/zed/zed_node/left/image_rect_color', Image, self.image_callback)
self.image_sub = message_filters.Subscriber('/zed/zed_node/left/image_rect_color', Image)
#self.start_time = time.time()
self.start_time = rospy.Time.now()
self.frame_number = 0
self.image_lock = threading.Lock()
self.current_image = None
self.init_pyglet()
self.ts = message_filters.TimeSynchronizer([self.image_sub], 10)
self.ts.registerCallback(self.image_callback)
def init_pyglet(self):
self.window = pyglet.window.Window(fullscreen=True, caption=None, resizable=False)
display = pyglet.canvas.Display()
screen = display.get_default_screen()
self.screen_width, self.screen_height = screen.width, screen.height
self.batch = pyglet.graphics.Batch()
# Create graphical elements (dashed lines, circles, etc.)
self.add_graphical_elements()
# Initialize labels after creating circles
self.init_labels()
def add_graphical_elements(self):
# Coordinates
x1 = self.screen_width // 3
x2 = 2 * self.screen_width // 3
y1 = 0
y2 = self.screen_height
y3 = self.screen_height // 3
y4 = 2 * self.screen_height // 3
# Creating dashed lines
self.dashed_lines = []
for i in range(int((y2 - y1) / (dash_length + gap_length))):
start_y = y1 + (dash_length + gap_length) * i
end_y = start_y + dash_length
self.dashed_lines.append(shapes.Line(x1, start_y, x1, end_y, line_thickness, color=(0, 0, 0), batch=self.batch))
self.dashed_lines.append(shapes.Line(x2, start_y, x2, end_y, line_thickness, color=(0, 0, 0), batch=self.batch))
for i in range(int((x2 - x1) / (dash_length + gap_length))):
start_x = x1 + (dash_length + gap_length) * i
end_x = start_x + dash_length
self.dashed_lines.append(shapes.Line(start_x, y3, end_x, y3, line_thickness, color=(0, 0, 0), batch=self.batch))
self.dashed_lines.append(shapes.Line(start_x, y4, end_x, y4, line_thickness, color=(0, 0, 0), batch=self.batch))
# Creating circles
self.circles = [
shapes.Circle(x1 + (x2 - x1) // 2, y2 // 6, circle_radius, color=(0, 0, 0), batch=self.batch), # Top
shapes.Circle(x1 + (x2 - x1) // 2, y2 // 2, circle_radius, color=(0, 0, 0), batch=self.batch), # Centre
shapes.Circle(x1 + (x2 - x1) // 2, 5 * y2 // 6, circle_radius, color=(0, 0, 0), batch=self.batch), # Bottom
shapes.Circle(x1 // 2, y3 + (y4 - y3) // 2, circle_radius, color=(0, 0, 0), batch=self.batch), # Left
shapes.Circle(x2 + (x2 - x1) // 2, y3 + (y4 - y3) // 2, circle_radius, color=(0, 0, 0), batch=self.batch) # Right
]
# Flickering circles setup
self.flicker_timers = [0.0, 0.0, 0.0, 0.0, 0.0]
self.flicker_frequencies = [5.0, 15.0, 10.0, 20.0, 30.0]
def init_labels(self):
# Time and frame labels
self.time_label = pyglet.text.Label('Time: 0.0',
font_name='Times New Roman',
font_size=14,
x=10, y=10,
anchor_x='left', anchor_y='bottom',
color=(0, 0, 0, 255), batch=self.batch)
self.frame_label = pyglet.text.Label('Frame: 0',
font_name='Times New Roman',
font_size=14,
x=10, y=30,
anchor_x='left', anchor_y='bottom',
color=(0, 0, 0, 255), batch=self.batch)
# Frequency labels for each circle
self.frequency_labels = [
pyglet.text.Label(f'{freq} Hz',
font_name='Times New Roman',
font_size=12,
x=circle.x, y=circle.y - 50, # Adjust position as needed
anchor_x='center', anchor_y='center',
color=(0, 0, 0, 255), batch=self.batch) for circle, freq in zip(self.circles, self.flicker_frequencies)
]
self.frame_rate_label = pyglet.text.Label('FPS: 0',
font_name='Times New Roman',
font_size=14,
x=10, y=50,
anchor_x='left', anchor_y='bottom',
color=(0, 0, 0, 255), batch=self.batch)
def image_callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
return
#image_timestamp = data.header.stamp.to_sec() #if needed
with self.image_lock:
self.current_image = cv_image
def update(self, dt):
with self.image_lock:
if self.current_image is not None:
# Convert BGR to RGB
rgb_image = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2RGB)
# Update flickering circles
for i, circle in enumerate(self.circles):
self.flicker_timers[i] += dt
if self.flicker_timers[i] >= 1.0 / self.flicker_frequencies[i]:
circle.visible = not circle.visible
self.flicker_timers[i] = 0.0
# Convert the OpenCV image (now in RGB format) to a format Pyglet can display
format = 'RGB'
pitch = -rgb_image.shape[1] * 3 # Assuming 3 channels for RGB
image_data = pyglet.image.ImageData(rgb_image.shape[1], rgb_image.shape[0],
format, rgb_image.tobytes(), pitch=pitch)
# Resize the image to fit the full window
texture = image_data.get_texture()
texture.width = self.screen_width
texture.height = self.screen_height
# Update time and frame labels
#elapsed_time = time.time() - self.start_time
current_time = rospy.Time.now()
elapsed_time = (current_time - self.start_time).to_sec()
self.time_label.text = f'Time: {elapsed_time:.2f} s'
self.frame_label.text = f'Frame: {self.frame_number}'
# Update the frame rate label
fps = pyglet.clock.get_fps()
self.frame_rate_label.text = f'FPS: {fps:.2f}'
self.frame_number += 1
self.window.clear()
texture.blit(0, 0) # Draw the image
self.batch.draw()
def run(self):
pyglet.clock.schedule_interval(self.update, 1 / 30.0) # Update at 30 FPS
pyglet.app.run()
if __name__ == '__main__':
node = PygletROSNode()
node.run()