forked from mirrors/linux
		
	 ee057c8c19
			
		
	
	
		ee057c8c19
		
	
	
	
	
		
			
			-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAma5LLIeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGwUAIAJNwbkdgTIqEsyBU wsFcXGaFSsGJNbTulINJb34jl2gD2yr4pmnnrA0NePW1TUKOnx169hNMF8NWbr/A 0cHIREV9cyfnm/kzAcnHn7cWLSmsKd+x3TnCbCyodDZQDJzdLmw3LQG+4dTNJbw1 WtJO/EoaU4qaydW2VxtApw54sirq5bopZz7rpcRapA1afzbA2TUDbnnuEWjm9KCF 5K+RZTJZA/xI9gqEwJB+/p5FglW4n/T3xcDwaQp5uFsDskgV5e1AUrRLM+icTsem 0Egrs8Ca2Vp4oBM+r9miCSwjRu04jLKyuu20p7AN8zXLyN7WGAjduS15Dv+aHRZ/ 9XABZs0= =/T17 -----END PGP SIGNATURE----- Merge tag 'v6.11-rc3' into trace/ring-buffer/core The "reserve_mem" kernel command line parameter has been pulled into v6.11. Merge the latest -rc3 to allow the persistent ring buffer memory to be able to be mapped at the address specified by the "reserve_mem" command line parameter. Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
		
			
				
	
	
		
			251 lines
		
	
	
	
		
			8.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			251 lines
		
	
	
	
		
			8.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _LINUX_RING_BUFFER_H
 | |
| #define _LINUX_RING_BUFFER_H
 | |
| 
 | |
| #include <linux/mm.h>
 | |
| #include <linux/seq_file.h>
 | |
| #include <linux/poll.h>
 | |
| 
 | |
| #include <uapi/linux/trace_mmap.h>
 | |
| 
 | |
| struct trace_buffer;
 | |
| struct ring_buffer_iter;
 | |
| 
 | |
| /*
 | |
|  * Don't refer to this struct directly, use functions below.
 | |
|  */
 | |
| struct ring_buffer_event {
 | |
| 	u32		type_len:5, time_delta:27;
 | |
| 
 | |
| 	u32		array[];
 | |
| };
 | |
| 
 | |
| /**
 | |
|  * enum ring_buffer_type - internal ring buffer types
 | |
|  *
 | |
|  * @RINGBUF_TYPE_PADDING:	Left over page padding or discarded event
 | |
|  *				 If time_delta is 0:
 | |
|  *				  array is ignored
 | |
|  *				  size is variable depending on how much
 | |
|  *				  padding is needed
 | |
|  *				 If time_delta is non zero:
 | |
|  *				  array[0] holds the actual length
 | |
|  *				  size = 4 + length (bytes)
 | |
|  *
 | |
|  * @RINGBUF_TYPE_TIME_EXTEND:	Extend the time delta
 | |
|  *				 array[0] = time delta (28 .. 59)
 | |
|  *				 size = 8 bytes
 | |
|  *
 | |
|  * @RINGBUF_TYPE_TIME_STAMP:	Absolute timestamp
 | |
|  *				 Same format as TIME_EXTEND except that the
 | |
|  *				 value is an absolute timestamp, not a delta
 | |
|  *				 event.time_delta contains bottom 27 bits
 | |
|  *				 array[0] = top (28 .. 59) bits
 | |
|  *				 size = 8 bytes
 | |
|  *
 | |
|  * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
 | |
|  *				Data record
 | |
|  *				 If type_len is zero:
 | |
|  *				  array[0] holds the actual length
 | |
|  *				  array[1..(length+3)/4] holds data
 | |
|  *				  size = 4 + length (bytes)
 | |
|  *				 else
 | |
|  *				  length = type_len << 2
 | |
|  *				  array[0..(length+3)/4-1] holds data
 | |
|  *				  size = 4 + length (bytes)
 | |
|  */
 | |
| enum ring_buffer_type {
 | |
| 	RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
 | |
| 	RINGBUF_TYPE_PADDING,
 | |
| 	RINGBUF_TYPE_TIME_EXTEND,
 | |
| 	RINGBUF_TYPE_TIME_STAMP,
 | |
| };
 | |
| 
 | |
| unsigned ring_buffer_event_length(struct ring_buffer_event *event);
 | |
| void *ring_buffer_event_data(struct ring_buffer_event *event);
 | |
| u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 | |
| 				 struct ring_buffer_event *event);
 | |
| 
 | |
| /*
 | |
|  * ring_buffer_discard_commit will remove an event that has not
 | |
|  *   been committed yet. If this is used, then ring_buffer_unlock_commit
 | |
|  *   must not be called on the discarded event. This function
 | |
|  *   will try to remove the event from the ring buffer completely
 | |
|  *   if another event has not been written after it.
 | |
|  *
 | |
|  * Example use:
 | |
|  *
 | |
|  *  if (some_condition)
 | |
|  *    ring_buffer_discard_commit(buffer, event);
 | |
|  *  else
 | |
|  *    ring_buffer_unlock_commit(buffer, event);
 | |
|  */
 | |
| void ring_buffer_discard_commit(struct trace_buffer *buffer,
 | |
| 				struct ring_buffer_event *event);
 | |
| 
 | |
| /*
 | |
|  * size is in bytes for each per CPU buffer.
 | |
|  */
 | |
| struct trace_buffer *
 | |
| __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
 | |
| 
 | |
| struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
 | |
| 					       int order, unsigned long start,
 | |
| 					       unsigned long range_size,
 | |
| 					       struct lock_class_key *key);
 | |
| 
 | |
| bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
 | |
| 				 long *data);
 | |
| 
 | |
| /*
 | |
|  * Because the ring buffer is generic, if other users of the ring buffer get
 | |
|  * traced by ftrace, it can produce lockdep warnings. We need to keep each
 | |
|  * ring buffer's lock class separate.
 | |
|  */
 | |
| #define ring_buffer_alloc(size, flags)			\
 | |
| ({							\
 | |
| 	static struct lock_class_key __key;		\
 | |
| 	__ring_buffer_alloc((size), (flags), &__key);	\
 | |
| })
 | |
| 
 | |
| /*
 | |
|  * Because the ring buffer is generic, if other users of the ring buffer get
 | |
|  * traced by ftrace, it can produce lockdep warnings. We need to keep each
 | |
|  * ring buffer's lock class separate.
 | |
|  */
 | |
| #define ring_buffer_alloc_range(size, flags, order, start, range_size)	\
 | |
| ({									\
 | |
| 	static struct lock_class_key __key;				\
 | |
| 	__ring_buffer_alloc_range((size), (flags), (order), (start),	\
 | |
| 				  (range_size), &__key);		\
 | |
| })
 | |
| 
 | |
| typedef bool (*ring_buffer_cond_fn)(void *data);
 | |
| int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
 | |
| 		     ring_buffer_cond_fn cond, void *data);
 | |
| __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 | |
| 			  struct file *filp, poll_table *poll_table, int full);
 | |
| void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
 | |
| 
 | |
| #define RING_BUFFER_ALL_CPUS -1
 | |
| 
 | |
| void ring_buffer_free(struct trace_buffer *buffer);
 | |
| 
 | |
| int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
 | |
| 
 | |
| void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
 | |
| 
 | |
| struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
 | |
| 						   unsigned long length);
 | |
| int ring_buffer_unlock_commit(struct trace_buffer *buffer);
 | |
| int ring_buffer_write(struct trace_buffer *buffer,
 | |
| 		      unsigned long length, void *data);
 | |
| 
 | |
| void ring_buffer_nest_start(struct trace_buffer *buffer);
 | |
| void ring_buffer_nest_end(struct trace_buffer *buffer);
 | |
| 
 | |
| struct ring_buffer_event *
 | |
| ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
 | |
| 		 unsigned long *lost_events);
 | |
| struct ring_buffer_event *
 | |
| ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
 | |
| 		    unsigned long *lost_events);
 | |
| 
 | |
| struct ring_buffer_iter *
 | |
| ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
 | |
| void ring_buffer_read_prepare_sync(void);
 | |
| void ring_buffer_read_start(struct ring_buffer_iter *iter);
 | |
| void ring_buffer_read_finish(struct ring_buffer_iter *iter);
 | |
| 
 | |
| struct ring_buffer_event *
 | |
| ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
 | |
| void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
 | |
| void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
 | |
| int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
 | |
| bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
 | |
| 
 | |
| unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
 | |
| 
 | |
| void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
 | |
| void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
 | |
| void ring_buffer_reset(struct trace_buffer *buffer);
 | |
| 
 | |
| #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
 | |
| int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
 | |
| 			 struct trace_buffer *buffer_b, int cpu);
 | |
| #else
 | |
| static inline int
 | |
| ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
 | |
| 		     struct trace_buffer *buffer_b, int cpu)
 | |
| {
 | |
| 	return -ENODEV;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| bool ring_buffer_empty(struct trace_buffer *buffer);
 | |
| bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
 | |
| 
 | |
| void ring_buffer_record_disable(struct trace_buffer *buffer);
 | |
| void ring_buffer_record_enable(struct trace_buffer *buffer);
 | |
| void ring_buffer_record_off(struct trace_buffer *buffer);
 | |
| void ring_buffer_record_on(struct trace_buffer *buffer);
 | |
| bool ring_buffer_record_is_on(struct trace_buffer *buffer);
 | |
| bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
 | |
| void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
 | |
| void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
 | |
| 
 | |
| u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_entries(struct trace_buffer *buffer);
 | |
| unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
 | |
| unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
 | |
| unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
 | |
| 
 | |
| u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
 | |
| void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
 | |
| 				      int cpu, u64 *ts);
 | |
| void ring_buffer_set_clock(struct trace_buffer *buffer,
 | |
| 			   u64 (*clock)(void));
 | |
| void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
 | |
| bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
 | |
| 
 | |
| size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
 | |
| 
 | |
| struct buffer_data_read_page;
 | |
| struct buffer_data_read_page *
 | |
| ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
 | |
| void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
 | |
| 				struct buffer_data_read_page *page);
 | |
| int ring_buffer_read_page(struct trace_buffer *buffer,
 | |
| 			  struct buffer_data_read_page *data_page,
 | |
| 			  size_t len, int cpu, int full);
 | |
| void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
 | |
| 
 | |
| struct trace_seq;
 | |
| 
 | |
| int ring_buffer_print_entry_header(struct trace_seq *s);
 | |
| int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
 | |
| 
 | |
| int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
 | |
| int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
 | |
| int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
 | |
| 
 | |
| enum ring_buffer_flags {
 | |
| 	RB_FL_OVERWRITE		= 1 << 0,
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_RING_BUFFER
 | |
| int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
 | |
| #else
 | |
| #define trace_rb_cpu_prepare	NULL
 | |
| #endif
 | |
| 
 | |
| int ring_buffer_map(struct trace_buffer *buffer, int cpu,
 | |
| 		    struct vm_area_struct *vma);
 | |
| int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
 | |
| int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
 | |
| #endif /* _LINUX_RING_BUFFER_H */
 |