Commit dbf23307 authored by Lubomir Bulej's avatar Lubomir Bulej

Cleanup and update to use the new buffer and buffer packing ops

parent a122f398
......@@ -4,6 +4,9 @@
#include <string.h>
#include <jvmti.h>
//
#define PROP_SHADOW_HOST "svm-agent.shadow.host"
#define CONF_SHADOW_HOST_DEFAULT "localhost"
......@@ -14,6 +17,7 @@
#define PROP_DEBUG "svm-agent.debug"
#define CONF_DEBUG_DEFAULT false
//
struct config {
char * shadow_host;
......
#include "freehandler.h"
#include "shared/buffer.h"
#include "shared/buffpack.h"
#include "shared/bytebuffer.h"
#include "shared/bufferpack.h"
#include "shared/messagetype.h"
#include "pbmanager.h"
......@@ -9,80 +9,86 @@
#include "jvmtiutil.h"
static jvmtiEnv *jvmti_env;
static jrawMonitorID obj_free_lock;
#define MAX_OBJ_FREE_EVENTS 4096
static process_buffs *obj_free_buff = NULL;
static process_buffs * obj_free_buff = NULL;
static jint obj_free_event_count = 0;
static size_t obj_free_event_count_pos = 0;
void
fh_init (jvmtiEnv * env) {
jvmti_env = env;
fh_init (jvmtiEnv * jvmti) {
jvmtiError error = (*jvmti)->CreateRawMonitor (jvmti, "obj free", &obj_free_lock);
check_jvmti_error (jvmti, error, "failed to create object free lock");
}
static inline void
__object_free (jlong tag) {
// allocate new obj free buffer
if (obj_free_buff == NULL) {
// obtain buffer
obj_free_buff = pb_utility_get ();
// reset number of events in the buffer
obj_free_event_count = 0;
// get pointer to the location where count of requests will stored
obj_free_event_count_pos = messager_objfree_header (
obj_free_buff->analysis_buff
);
}
jvmtiError error = (*jvmti_env)->CreateRawMonitor (jvmti_env, "obj free", &obj_free_lock);
check_jvmti_error (jvmti_env, error, "Cannot create raw monitor");
// obtain message buffer
buffer_t * buff = obj_free_buff->analysis_buff;
messager_objfree_item (buff, tag);
// update the number of free events
obj_free_event_count++;
buffer_put_jint (buff, obj_free_event_count, obj_free_event_count_pos);
if (obj_free_event_count >= MAX_OBJ_FREE_EVENTS) {
// NOTE: We can queue buffer to the sending queue. This is because
// object tagging thread is first sending the objects and then
// deallocating the global references. We cannot have here objects
// that weren't sent already
// NOTE2: It is mandatory to submit to the sending queue directly
// because GC (that is generating these events) will block the
// tagging thread. And with not working tagging thread, we can
// run out of buffers.
sender_enqueue (obj_free_buff);
// cleanup
obj_free_buff = NULL;
obj_free_event_count = 0;
obj_free_event_count_pos = 0;
}
}
void
fh_object_free (jlong tag) {
enter_critical_section (jvmti_env, obj_free_lock);
{
// allocate new obj free buffer
if (obj_free_buff == NULL) {
// obtain buffer
obj_free_buff = pb_utility_get ();
// reset number of events in the buffer
obj_free_event_count = 0;
// get pointer to the location where count of requests will stored
obj_free_event_count_pos = messager_objfree_header (
obj_free_buff->analysis_buff
);
}
// obtain message buffer
buffer * buff = obj_free_buff->analysis_buff;
messager_objfree_item (buff, tag);
// update the number of free events
++obj_free_event_count;
buff_put_int (buff, obj_free_event_count_pos, obj_free_event_count);
if (obj_free_event_count >= MAX_OBJ_FREE_EVENTS) {
// NOTE: We can queue buffer to the sending queue. This is because
// object tagging thread is first sending the objects and then
// deallocating the global references. We cannot have here objects
// that weren't send already
// NOTE2: It is mandatory to submit to the sending queue directly
// because gc (that is generating these events) will block the
// tagging thread. And with not working tagging thread, we can
// run out of buffers.
sender_enqueue (obj_free_buff);
// cleanup
obj_free_buff = NULL;
obj_free_event_count = 0;
obj_free_event_count_pos = 0;
}
fh_object_free (jvmtiEnv * jvmti, jlong tag) {
enter_critical_section (jvmti, obj_free_lock);
__object_free (tag);
exit_critical_section (jvmti, obj_free_lock);
}
static inline void
__send_buffer () {
if (obj_free_buff != NULL) {
sender_enqueue (obj_free_buff);
obj_free_buff = NULL;
}
exit_critical_section (jvmti_env, obj_free_lock);
}
void
fh_send_buffer () {
fh_send_buffer (jvmtiEnv * jvmti) {
// send object free buffer - with lock
enter_critical_section (jvmti_env, obj_free_lock);
{
if (obj_free_buff != NULL) {
sender_enqueue (obj_free_buff);
obj_free_buff = NULL;
}
}
exit_critical_section (jvmti_env, obj_free_lock);
enter_critical_section (jvmti, obj_free_lock);
__send_buffer ();
exit_critical_section (jvmti, obj_free_lock);
}
......@@ -3,8 +3,10 @@
#include <jvmti.h>
void fh_init (jvmtiEnv * env);
void fh_object_free (jlong tag);
void fh_send_buffer ();
//
void fh_init (jvmtiEnv * jvmti);
void fh_object_free (jvmtiEnv * jvmti, jlong tag);
void fh_send_buffer (jvmtiEnv * jvmti);
#endif /* _FREEHANDLER_H_ */
#include "globalbuffer.h"
#include "shared/threadlocal.h"
#include "shared/buffpack.h"
#include "shared/bufferpack.h"
#include "shared/messagetype.h"
#include "pbmanager.h"
......@@ -41,25 +41,24 @@ __to_buff_alloc_buffers (to_buff_struct * tobs, tldata * tld) {
}
static inline objtag_rec *
__buffer_ptr_objtag (buffer_t * restrict buffer, const size_t position) {
// Buffer should a command buffer containing object tagging records.
return (objtag_rec *) buffer_ptr_block (buffer, position, sizeof (objtag_rec));
}
static void
__shift_tag_positions (buffer * cmd_buff, size_t shift) {
__shift_tag_positions (buffer_t * buffer, const size_t shift) {
//
// Shift data buffer position in all object tagging
// records in the given command buffer.
// Shift tag positions in the data buffer in all object tagging
// requests in the given buffer. The buffer is assumed to be a
// command buffer containing only object tagging requests.
//
const size_t len = buffer_filled (cmd_buff);
for (size_t offset = 0; offset < len; offset += sizeof (objtag_rec)) {
//
// Fetch the object tagging record from the command buffer,
// shift the position in the data buffer, and store the
// record back to the command buffer.
//
// TODO LB: Can we do this in-place?
//
objtag_rec ot_rec;
buffer_read (cmd_buff, offset, &ot_rec, sizeof (objtag_rec));
ot_rec.buff_pos += shift;
buffer_fill_at_pos (cmd_buff, offset, &ot_rec, sizeof (objtag_rec));
const size_t len = buffer_length (buffer);
for (size_t pos = 0; pos < len; pos += sizeof (objtag_rec)) {
objtag_rec * ot_record = __buffer_ptr_objtag (buffer, pos);
ot_record->buff_pos += shift;
}
}
......@@ -99,7 +98,7 @@ glbuffer_commit (jvmtiEnv * jvmti) {
// copying the local buffers to the total-order buffer, adjust
// these positions so that they are relative to the total-order.
//
size_t to_offset = buffer_filled (tobs->pb->analysis_buff);
size_t to_offset = buffer_position (tobs->pb->analysis_buff);
__shift_tag_positions (tld->local_pb->command_buff, to_offset);
//
......@@ -107,22 +106,8 @@ glbuffer_commit (jvmtiEnv * jvmti) {
// from the local analysis buffer to the total-order buffers,
// and clear the local buffers afterwards.
//
buffer_fill (
tobs->pb->analysis_buff,
// NOTE: normally access the buffer using methods
tld->local_pb->analysis_buff->buff,
tld->local_pb->analysis_buff->occupied
);
buffer_fill (
tobs->pb->command_buff,
// NOTE: normally access the buffer using methods
tld->local_pb->command_buff->buff,
tld->local_pb->command_buff->occupied
);
buffer_clean (tld->local_pb->analysis_buff);
buffer_clean (tld->local_pb->command_buff);
buffer_drain_to (tld->local_pb->analysis_buff, tobs->pb->analysis_buff);
buffer_drain_to (tld->local_pb->command_buff, tobs->pb->command_buff);
//
// Increment the number of analysis requests in the thread-order
......@@ -132,9 +117,9 @@ glbuffer_commit (jvmtiEnv * jvmti) {
//
tobs->analysis_count++;
buff_put_int (
tobs->pb->analysis_buff, tobs->analysis_count_pos,
tobs->analysis_count
buffer_put_jint (
tobs->pb->analysis_buff, tobs->analysis_count,
tobs->analysis_count_pos
);
//
......
#ifndef _GLOBALBUFFER_H_
#define _GLOBALBUFFER_H_
#include <jvmti.h>
#include "shared/procbuffs.h"
#include "shared/buffer.h"
#include <jvmti.h>
// *** buffers for total ordering ***
......
......@@ -181,6 +181,32 @@ jvmti_get_phase (jvmtiEnv * jvmti) {
}
/**
* Returns the current thread. This method can
* only be called during the START and LIVE phases
*/
jthread
jvmti_get_current_thread (jvmtiEnv * jvmti) {
jthread thread = NULL;
jvmtiError error = (*jvmti)->GetCurrentThread (jvmti, &thread);
check_jvmti_error (jvmti, error, "failed to get current thread");
return thread;
}
/**
* Returns the size of the given object in bytes. This method can
* only be called during the START and LIVE phases.
*/
jlong
jvmti_get_object_size (jvmtiEnv * jvmti, jobject object) {
jlong size = -1;
jvmtiError error = (*jvmti)->GetObjectSize (jvmti, object, &size);
check_jvmti_error (jvmti, error, "failed to get object size");
return size;
}
/**
* Gets tag from the given object. This method can only be called
* during the START and LIVE phases.
......
......@@ -27,6 +27,10 @@ char * jvmti_get_system_property_string (
jvmtiEnv * jvmti, const char * name, const char * dflval
);
jthread jvmti_get_current_thread (jvmtiEnv * jvmti);
jlong jvmti_get_object_size (jvmtiEnv * jvmti, jobject object);
jlong jvmti_get_object_tag (jvmtiEnv * jvmti, jobject object);
jlong jvmti_set_object_tag (jvmtiEnv * jvmti, jobject object, jlong tag);
......
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#include "netref.h"
#include "shared/buffpack.h"
#include "shared/bufferpack.h"
#include "shared/messagetype.h"
#include "jvmtiutil.h"
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
//
// first available object id
static volatile jlong avail_object_id = 1;
......@@ -136,13 +138,13 @@ __netref_create (jlong object_id, jint class_id, bool is_class, bool has_data) {
// forward declaration
jlong get_net_reference (
JNIEnv * jni_env, jvmtiEnv * jvmti,
buffer * new_obj_buff, jobject obj
buffer_t * new_obj_buff, jobject obj
);
static int
_object_is_class (jvmtiEnv * jvmti, jobject object) {
// TODO isn't there better way?
// TODO isn't there a better way?
jvmtiError error = (*jvmti)->GetClassSignature (jvmti, object, NULL, NULL);
return error == JVMTI_ERROR_NONE;
}
......@@ -161,18 +163,18 @@ _set_net_reference (
static void
_pack_class_info (
buffer * buff, jlong class_netref, char * class_sig, char * class_gen,
buffer_t * buff, jlong class_netref, char * class_sig, char * class_gen,
jlong class_loader_netref, jlong super_class_netref
) {
//
// Class generic signature can be NULL, so
// Class generic signature can be NULL, so
// we pack an empty string instead.
//
if (class_gen == NULL) {
class_gen = "";
}
messager_classinfo_header (
messager_classinfo (
buff, class_netref, class_sig, class_gen,
class_loader_netref, super_class_netref
);
......@@ -181,14 +183,14 @@ _pack_class_info (
static jlong
_set_net_reference_for_class (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * buff, jclass klass
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * buff, jclass klass
) {
//
// Create extra space for local references. For details see:
// http://docs.oracle.com/javase/6/docs/platform/jvmti/jvmti.html#refs
//
jint result = (*jni)->PushLocalFrame (jni, 16);
check_error (result != 0, "failed to allocate more local references");
check_error (result != 0, "failed to allocate 16 more local references");
// *** set net reference for class ***
......@@ -252,7 +254,7 @@ _set_net_reference_for_class (
static jint
_get_class_id_for_class (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * buff, jclass klass
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * buff, jclass klass
) {
//
// Retrieve the tag of the given class to extract the class
......@@ -272,7 +274,7 @@ _get_class_id_for_class (
static jint
_get_class_id_for_object (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * buff, jobject object
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * buff, jobject object
) {
//
// Get class of this object and its class id.
......@@ -287,7 +289,7 @@ _get_class_id_for_object (
static jlong
_set_net_reference_for_object (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * buff, jobject object
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * buff, jobject object
) {
//
// Resolve object class id and get the next available object
......@@ -314,7 +316,7 @@ _set_net_reference_for_object (
*/
jlong
get_net_reference (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * new_obj_buff, jobject object
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * new_obj_buff, jobject object
) {
// Net reference for NULL is 0.
if (object == NULL) {
......
#ifndef _NETREF_H
#define _NETREF_H
#define _NETREF_H
#include "shared/bytebuffer.h"
#include <stdbool.h>
#include <jvmti.h>
#include <jni.h>
#include "shared/buffer.h"
//
#define NULL_NET_REF 0
......@@ -20,7 +22,7 @@ void netref_set_has_data (jlong * netref, bool has_data);
// !!! invocation of this method should be protected by lock until the reference
// is queued for sending
jlong get_net_reference (
JNIEnv * jni, jvmtiEnv * jvmti, buffer * new_obj_buff, jobject object
JNIEnv * jni, jvmtiEnv * jvmti, buffer_t * new_obj_buff, jobject object
);
// !!! invocation of this method should be protected by lock until the reference
......
......@@ -21,14 +21,14 @@ pb_init () {
bq_create (&empty_q, BQ_BUFFERS, sizeof (process_buffs *));
for (int i = 0; i < BQ_BUFFERS + BQ_UTILITY; i++) {
process_buffs *pb = &(pb_list[i]);
process_buffs * pb = &(pb_list [i]);
// allocate process_buffs
pb->analysis_buff = malloc (sizeof (buffer));
buffer_alloc (pb->analysis_buff);
pb->analysis_buff = malloc (sizeof (buffer_t));
buffer_init (pb->analysis_buff, BUFFER_INITIAL_CAPACITY);
pb->command_buff = malloc (sizeof (buffer));
buffer_alloc (pb->command_buff);
pb->command_buff = malloc (sizeof (buffer_t));
buffer_init (pb->command_buff, BUFFER_INITIAL_CAPACITY);
if (i < BQ_BUFFERS) {
// add buffer to the empty queue
......@@ -43,13 +43,11 @@ pb_init () {
void
pb_free () {
// NOTE: Buffers hold by other threads can be in inconsistent state.
// We cannot simply send them, so we at least inform the user.
// inform about all non-send buffers
// all buffers should be send except some daemon thread buffers
// - also some class loading + thread tagging buffers can be there (with 0)
// Report: .
// NOTE: Buffers held by other threads can be in inconsistent state.
// We cannot simply send them, so we at least inform the user about
// all unsent buffers. Generally, all buffers should be sent, with
// the exception of some daemon thread buffers. Also some class
// loading + thread tagging buffers can be there (with 0).
int relevant_count = 0;
int support_count = 0;
......@@ -57,51 +55,46 @@ pb_free () {
int non_marked_thread_count = 0;
for (int i = 0; i < BQ_BUFFERS; ++i) {
process_buffs * pb = &(pb_list [i]);
// buffer held by thread that performed (is still doing) analysis
// - probably analysis data
if (pb_list[i].owner_id >= STARTING_THREAD_ID) {
relevant_count += buffer_filled (pb_list[i].analysis_buff);
support_count += buffer_filled (pb_list[i].command_buff);
if (pb->owner_id >= STARTING_THREAD_ID) {
relevant_count += buffer_position (pb->analysis_buff);
support_count += buffer_position (pb->command_buff);
++marked_thread_count;
#ifdef DEBUG
printf ("Lost buffer for id %ld\n", pb_list[i].owner_id);
#endif
ldebug ("lost buffer for id %ld\n", pb->owner_id);
}
// buffer held by thread that did NOT perform analysis
// - support data
if (pb_list[i].owner_id == INVALID_THREAD_ID) {
support_count += buffer_filled (pb_list[i].analysis_buff)
+ buffer_filled (pb_list[i].command_buff);
if (pb->owner_id == INVALID_THREAD_ID) {
support_count += buffer_position (pb->analysis_buff)
+ buffer_position (pb->command_buff);
++non_marked_thread_count;
}
check_error (
pb_list[i].owner_id == PB_OBJTAG,
"Unprocessed buffers left in object tagging queue"
pb->owner_id == PB_OBJTAG,
"unprocessed buffers left in object tagging queue"
);
check_error (
pb_list[i].owner_id == PB_SEND,
"Unprocessed buffers left in sending queue"
pb->owner_id == PB_SEND,
"unprocessed buffers left in sending queue"
);
}
#ifdef DEBUG
if (relevant_count > 0 || support_count > 0) {
fprintf (
stderr, "%s%s%d%s%d%s%s%d%s%d%s",
"Warning: ",
"Due to non-terminated (daemon) threads, ",
relevant_count,
" bytes of relevant data and ",
support_count,
" bytes of support data were lost ",
"(thread count - analysis: ",
marked_thread_count,
", helper: ",
non_marked_thread_count,
").\n"
stderr, AGENT_NAME ": warning: due to non-terminated "
"(daemon) threads, %d bytes of relevant data and %d bytes "
"of support data were lost (thread count - analysis: %d, "
"helper: %d)\n",
relevant_count, support_count,
marked_thread_count, non_marked_thread_count
);
}
#endif
......@@ -124,8 +117,7 @@ pb_get (jlong thread_id) {
process_buffs *
pb_normal_get (jlong thread_id) {
// retrieves pointer to buffer
process_buffs *buffs;
process_buffs * buffs;
bq_pop (&empty_q, &buffs);
buffs->owner_id = thread_id;
return buffs;
......@@ -136,8 +128,8 @@ pb_normal_get (jlong thread_id) {
void
pb_normal_release (process_buffs * buffs) {
// empty buff
buffer_clean (buffs->analysis_buff);
buffer_clean (buffs->command_buff);
buffer_clear (buffs->analysis_buff);
buffer_clear (buffs->command_buff);
// stores pointer to buffer
buffs->owner_id = PB_FREE;
......@@ -149,7 +141,6 @@ process_buffs *
pb_utility_get () {
// retrieves pointer to buffer
process_buffs *buffs;
bq_pop (&utility_q, &buffs);
// no owner setting - it is already PB_UTILITY
......@@ -161,8 +152,8 @@ pb_utility_get () {
void
pb_utility_release (process_buffs * buffs) {
// empty buff
buffer_clean (buffs->analysis_buff);
buffer_clean (buffs->command_buff);
buffer_clear (buffs->analysis_buff);
buffer_clear (buffs->command_buff);
// stores pointer to buffer
buffs->owner_id = PB_UTILITY;
......
......@@ -6,7 +6,7 @@
#include <jvmti.h>
#include "shared/buffer.h"
#include "shared/procbuffs.h"
// Utility queue (buffer) is specifically reserved for sending different
// messages then analysis messages. The rationale behind utility buffers is that
......
#include "redispatcher.h"
#include "shared/buffpack.h"
#include "shared/bufferpack.h"
#include "shared/messagetype.h"
#include "shared/threadlocal.h"
......@@ -19,40 +19,52 @@
// ******************* Advanced packing routines *******************
static void
__fill_ot_rec (
JNIEnv * jni, buffer * cmd_buff, buffer * buff,
jobject object, unsigned char object_type
static inline objtag_rec *
__buffer_claim_objtag (buffer_t * restrict buffer) {
return (objtag_rec *) buffer_claim (buffer, sizeof (objtag_rec));
}
static inline void
__buffer_append_objtag (
buffer_t * buffer, jobject object_global,
unsigned char object_type, size_t tag_position
) {
//
// Create a new object tagging record. The tagging record contains the
// type of the object to be tagged, a global reference to the object
// to be tagged, and the position in the data buffer, where the object
// tag will be stored once it is assigned and the object is tagged.
// Place a new object tagging record in the given buffer. The tagging
// record contains the type of the object to be tagged, a global
// reference to the object, and the position in the data buffer, where
// the tag will be stored once it assigned to the object.
//
objtag_rec ot_rec = {
objtag_rec * ot_record = __buffer_claim_objtag (buffer);
*ot_record = (objtag_rec) {
.obj_type = object_type,
.buff_pos = buffer_filled (buff),
.obj_to_tag = (*jni)->NewGlobalRef (jni, object)
.buff_pos = tag_position,
.obj_to_tag = object_global
};
// Store the tagging record to the command buffer.
buffer_fill (cmd_buff, &ot_rec, sizeof (ot_rec));
}
static void
pack_object (
JNIEnv * jni, buffer * buff, buffer * cmd_buff,
JNIEnv * jni, buffer_t * anl_buff, buffer_t * cmd_buff,
jobject object, unsigned char object_type
) {
// create entry for object tagging thread that will replace the null ref
//
// Create a tag entry in the command buffer and store a NULL
// tag in the analysis buffer as a placeholder for the tag.
// For non-null objects, the placeholder NULL tag will be
// replaced by the actual tag assigned to the object.
//
if (object != NULL) {
__fill_ot_rec (jni, cmd_buff, buff, object, object_type);
jobject object_global = (*jni)->NewGlobalRef (jni, object);
const size_t tag_pos = buffer_position (anl_buff);
__buffer_append_objtag (cmd_buff, object_global, object_type, tag_pos);
}
// pack null net reference
pack_long (buff, NULL_NET_REF);
buffer_append_jlong (anl_buff, NULL_NET_REF);
}
// ******************* analysis helper methods *******************
......@@ -66,14 +78,13 @@ static atomic_int_least16_t next_analysis_id = ATOMIC_VAR_INIT (1);
/**
* Returns next analysis id and increments the
* the next available id counter.
* Retrieves the next analysis id and bumps the analysis id counter.
*/
static jshort
static inline jshort
__next_analysis_id () {
atomic_int_least16_t result = atomic_fetch_add (&next_analysis_id, 1);
check_error (result <= 0, "invalid next analysis id: %" PRIdLEAST16, result);
return (jshort) result;
jshort result = (jshort) atomic_fetch_add (&next_analysis_id, 1);
check_error (result <= 0, "invalid analysis id: %" PRIdLEAST16, result);
return result;
}
......@@ -119,7 +130,7 @@ __register_method (
process_buffs * buffs = pb_utility_get ();
jshort new_analysis_id = __next_analysis_id ();
messager_reganalysis_header (
messager_reganalysis (
buffs->analysis_buff, new_analysis_id, utf_str, utf_len
);
......@@ -167,83 +178,76 @@ native_analysisEnd (JNIEnv * jni, jclass this_class) {